blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aca0f8da76438bb92344a411b386bae33b859c61 | 804d40b874e2eb1f2e9f3f3f124d507bf2b517f1 | /env/Lib/site-packages/sqlalchemy/dialects/mysql/json.py | 4961ef33b155b194515e5e298d98eb38f3cc976d | [] | no_license | Nestor-Leyva/api-flask | 86d5d3053e62767813aeacea5f30cc6a355320d0 | 55675a02fd79263518b0dfc731a2b4a2be50bd0d | refs/heads/main | 2023-08-21T03:00:18.740097 | 2021-10-04T19:25:38 | 2021-10-04T19:25:38 | 413,517,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | # mysql/json.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7.
MariaDB supports JSON (as an alias for LONGTEXT) as of version 10.2.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
.. versionadded:: 1.1
"""
pass
class _FormatTypeMixin(object):
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)
| [
"noreply@github.com"
] | Nestor-Leyva.noreply@github.com |
463affe2ad1841699dfb96a3668f21e2c37af98e | 0ce68cc0e9b93ae339e69f9f328e27262ebe0ab9 | /art/attacks/inference/membership_inference/label_only_boundary_distance.py | be29c3e16d90254053b6f3121a0fd6f4623acf20 | [
"MIT"
] | permissive | igor-barinov/adversarial-robustness-toolbox | ede762bafa471d0d0664e82649f35bf0455c0d9a | 10518daca0d5f2eb3bcd64022c2151cadc843443 | refs/heads/main | 2023-07-16T08:36:51.500788 | 2021-07-14T15:19:45 | 2021-07-14T15:19:45 | 376,598,416 | 1 | 0 | MIT | 2021-06-13T17:09:16 | 2021-06-13T17:09:15 | null | UTF-8 | Python | false | false | 8,046 | py | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the Label-Only Inference Attack based on Decision Boundary.
| Paper link: https://arxiv.org/abs/2007.14321
"""
import logging
from typing import Optional, TYPE_CHECKING
import numpy as np
from art.attacks.attack import InferenceAttack
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin
from art.utils import check_and_transform_label_format
if TYPE_CHECKING:
from art.utils import CLASSIFIER_TYPE
logger = logging.getLogger(__name__)
class LabelOnlyDecisionBoundary(InferenceAttack):
"""
Implementation of Label-Only Inference Attack based on Decision Boundary.
| Paper link: https://arxiv.org/abs/2007.14321
"""
attack_params = InferenceAttack.attack_params + [
"distance_threshold_tau",
]
_estimator_requirements = (BaseEstimator, ClassifierMixin)
def __init__(self, estimator: "CLASSIFIER_TYPE", distance_threshold_tau: Optional[float] = None):
"""
Create a `LabelOnlyDecisionBoundary` instance for Label-Only Inference Attack based on Decision Boundary.
:param estimator: A trained classification estimator.
:param distance_threshold_tau: Threshold distance for decision boundary. Samples with boundary distances larger
than threshold are considered members of the training dataset.
"""
super().__init__(estimator=estimator)
self.distance_threshold_tau = distance_threshold_tau
self._check_params()
def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Infer membership of input `x` in estimator's training data.
:param x: Input data.
:param y: True labels for `x`.
:Keyword Arguments for HopSkipJump:
* *norm*: Order of the norm. Possible values: "inf", np.inf or 2.
* *max_iter*: Maximum number of iterations.
* *max_eval*: Maximum number of evaluations for estimating gradient.
* *init_eval*: Initial number of evaluations for estimating gradient.
* *init_size*: Maximum number of trials for initial generation of adversarial examples.
* *verbose*: Show progress bars.
:return: An array holding the inferred membership status, 1 indicates a member and 0 indicates non-member.
"""
from art.attacks.evasion.hop_skip_jump import HopSkipJump
if y is None:
raise ValueError("Argument `y` is None, but this attack requires true labels `y` to be provided.")
if self.distance_threshold_tau is None:
raise ValueError(
"No value for distance threshold `distance_threshold_tau` provided. Please set"
"`distance_threshold_tau` or run method `calibrate_distance_threshold` on known training and test"
"dataset."
)
if "classifier" in kwargs:
raise ValueError("Keyword `classifier` in kwargs is not supported.")
if "targeted" in kwargs:
raise ValueError("Keyword `targeted` in kwargs is not supported.")
y = check_and_transform_label_format(y, self.estimator.nb_classes)
hsj = HopSkipJump(classifier=self.estimator, targeted=False, **kwargs)
x_adv = hsj.generate(x=x, y=y)
distance = np.linalg.norm((x_adv - x).reshape((x.shape[0], -1)), ord=2, axis=1)
y_pred = self.estimator.predict(x=x)
distance[np.argmax(y_pred, axis=1) != np.argmax(y, axis=1)] = 0
is_member = np.where(distance > self.distance_threshold_tau, 1, 0)
return is_member
def calibrate_distance_threshold(
self, x_train: np.ndarray, y_train: np.ndarray, x_test: np.ndarray, y_test: np.ndarray, **kwargs
):
"""
Calibrate distance threshold maximising the membership inference accuracy on `x_train` and `x_test`.
:param x_train: Training data.
:param y_train: Labels of training data `x_train`.
:param x_test: Test data.
:param y_test: Labels of test data `x_test`.
:Keyword Arguments for HopSkipJump:
* *norm*: Order of the norm. Possible values: "inf", np.inf or 2.
* *max_iter*: Maximum number of iterations.
* *max_eval*: Maximum number of evaluations for estimating gradient.
* *init_eval*: Initial number of evaluations for estimating gradient.
* *init_size*: Maximum number of trials for initial generation of adversarial examples.
* *verbose*: Show progress bars.
"""
from art.attacks.evasion.hop_skip_jump import HopSkipJump
if "classifier" in kwargs:
raise ValueError("Keyword `classifier` in kwargs is not supported.")
if "targeted" in kwargs:
raise ValueError("Keyword `targeted` in kwargs is not supported.")
y_train = check_and_transform_label_format(y_train, self.estimator.nb_classes)
y_test = check_and_transform_label_format(y_test, self.estimator.nb_classes)
hsj = HopSkipJump(classifier=self.estimator, targeted=False, **kwargs)
x_train_adv = hsj.generate(x=x_train, y=y_train)
x_test_adv = hsj.generate(x=x_test, y=y_test)
distance_train = np.linalg.norm((x_train_adv - x_train).reshape((x_train.shape[0], -1)), ord=2, axis=1)
distance_test = np.linalg.norm((x_test_adv - x_test).reshape((x_test.shape[0], -1)), ord=2, axis=1)
y_train_pred = self.estimator.predict(x=x_train)
y_test_pred = self.estimator.predict(x=x_test)
distance_train[np.argmax(y_train_pred, axis=1) != np.argmax(y_train, axis=1)] = 0
distance_test[np.argmax(y_test_pred, axis=1) != np.argmax(y_test, axis=1)] = 0
num_increments = 100
tau_increment = np.amax([np.amax(distance_train), np.amax(distance_test)]) / num_increments
acc_max = 0.0
distance_threshold_tau = 0.0
for i_tau in range(1, num_increments):
is_member_train = np.where(distance_train > i_tau * tau_increment, 1, 0)
is_member_test = np.where(distance_test > i_tau * tau_increment, 1, 0)
acc = (np.sum(is_member_train) + (is_member_test.shape[0] - np.sum(is_member_test))) / (
is_member_train.shape[0] + is_member_test.shape[0]
)
if acc > acc_max:
distance_threshold_tau = i_tau * tau_increment
acc_max = acc
self.distance_threshold_tau = distance_threshold_tau
def _check_params(self) -> None:
if self.distance_threshold_tau is not None and (
not isinstance(self.distance_threshold_tau, (int, float)) or self.distance_threshold_tau <= 0.0
):
raise ValueError("The distance threshold `distance_threshold_tau` needs to be a positive float.")
| [
"beat.buesser@ie.ibm.com"
] | beat.buesser@ie.ibm.com |
3413e4da2bfd468cf595d767feb1d2525b88ca04 | cdfb77f5fb782ed8c731c6789ba154fefb34b830 | /Seção 7/deque.py | 1677e2b381400de0a3e586e8e583e9016cf89942 | [] | no_license | Yuri-Santiago/curso-udemy-python | 7dc83e0ade45e8d959ce12b81098a13617e0a7ca | 2af0ddad01b08f6afd0bfe35648212d4ee49f52b | refs/heads/master | 2023-04-21T07:11:35.594753 | 2021-05-18T05:14:56 | 2021-05-18T05:14:56 | 350,412,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | """
Módulo Collections - Deque
Podemos dizer que o Deque é uma lista de alta performance.
"""
from collections import deque
# Criando deques
deq = deque('yuri')
print(deq)
# Adicionando elementos no deque
deq.append('m') # Adciona no final
print(deq)
deq.appendleft('O') # Adiciona no começo
print(deq)
# Remover elementos
print(deq.pop()) # Remove e retorna o último elemento
print(deq)
print(deq.popleft()) # Remove e retorna o primeiro elemento
print(deq)
| [
"yurimateussantiago@gmail.com"
] | yurimateussantiago@gmail.com |
05e7e379498d8da233aebf0da6207fd6cce541c8 | fc9f4e6af9df3d05c507c9e114b956dfc26cd0f0 | /chapters/2023/Qualité logicielle dans les notebooks Jupyter/assets/python-scripts/0002_get_basket_composition_for_date_range.py | f241d486a24f743a1215f5f1f900614880dfe8a5 | [] | no_license | RIMEL-UCA/RIMEL-UCA.github.io | 0f1334bf9ba77a5ef59c63065f2dbe7c00d70f25 | 3009e69eab06c9dc4f6f2b7f866fa0b00f909516 | refs/heads/master | 2023-07-03T16:00:05.606141 | 2023-02-12T14:40:35 | 2023-02-12T14:40:35 | 230,765,683 | 7 | 29 | null | 2023-03-05T22:09:35 | 2019-12-29T15:04:00 | Jupyter Notebook | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import datetime as dt
import pandas as pd
from gs_quant.markets.baskets import Basket
from gs_quant.session import Environment, GsSession
# In[ ]:
client = 'CLIENT ID'
secret = 'CLIENT SECRET'
GsSession.use(Environment.PROD, client_id=client, client_secret=secret, scopes=('read_product_data',))
# In[ ]:
basket = Basket.get('GSMBXXXX') # substitute input with any identifier for a basket
# In[ ]:
position_sets = basket.get_position_sets(dt.date(2021, 1, 7), dt.date(2021, 1, 7))
position_sets = pd.concat([position_set.to_frame() for position_set in position_sets])
| [
"vincetl74@gmail.com"
] | vincetl74@gmail.com |
c33a7c77d3a95e5a197628b12e94dbd929b9403d | 4359911a3546134982c10fa2965a85e3eaf244c1 | /test_3d_car_instance.py | 34f7356def88cc6d5a44f77c0ef23b095232db3a | [] | no_license | itsme-ranger/ApolloScape_InstanceSeg | 154614eefbf4965204cfc243f77ea52a8830322f | 816abea8992abdcd54f0fc155620c1b8da41ba2d | refs/heads/master | 2022-04-10T02:48:38.147939 | 2020-02-08T09:36:11 | 2020-02-08T09:36:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,122 | py | """Perform inference on one or more datasets."""
import argparse
import cv2
import os
import pprint
import sys
import matplotlib
#matplotlib.use('Agg')
import torch
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import _init_paths # pylint: disable=unused-import
from core.config import cfg, merge_cfg_from_file, merge_cfg_from_list, assert_and_infer_cfg
from core.test_engine import run_inference
import utils.logging
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def parse_args():
"""Parse in command line arguments"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
######################## cfg #####################
#parser.add_argument('--cfg', dest='cfg_file', default='./configs/e2e_3d_car_101_FPN_triple_head_non_local_weighted.yaml', help='Config file for training (and optionally testing)')
#parser.add_argument('--cfg', dest='cfg_file', default='./configs/e2e_3d_car_101_FPN_triple_head_non_local.yaml', help='Config file for training (and optionally testing)')
parser.add_argument('--cfg', dest='cfg_file', default='./configs/e2e_3d_car_101_FPN_triple_head.yaml', help='Config file for training (and optionally testing)')
#parser.add_argument('--load_ckpt', default='/media/samsumg_1tb/ApolloScape/ApolloScape_InstanceSeg/e2e_3d_car_101_FPN_triple_head_non_local/Oct03-12-44-22_N606-TITAN32_step/ckpt/model_step55277.pth', help='checkpoint path to load')
#parser.add_argument('--load_ckpt', default='/media/samsumg_1tb/ApolloScape/ApolloScape_InstanceSeg/e2e_3d_car_101_FPN_triple_head_non_local/Oct03-12-44-22_N606-TITAN32_step/ckpt/model_step55277.pth', help='checkpoint path to load')
parser.add_argument('--load_ckpt', default='/media/samsumg_1tb/ApolloScape/ApolloScape_InstanceSeg/e2e_3d_car_101_FPN_triple_head/Sep09-23-42-21_N606-TITAN32_step/ckpt/model_step56534.pth', help='checkpoint path to load')
######################## ckpt #####################
parser.add_argument('--dataset', dest='dataset', default='ApolloScape', help='Dataset to use')
parser.add_argument('--dataset_dir', default='/media/samsumg_1tb/ApolloScape/ECCV2018_apollo/train/')
parser.add_argument('--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument('--output_dir', help='output directory to save the testing results. If not provided defaults to [args.load_ckpt|args.load_detectron]/../test.')
parser.add_argument('--set', dest='set_cfgs', help='set config keys, will overwrite config in the cfg_file. See lib/core/config.py for all options', default=[], nargs='*')
parser.add_argument('--multi-gpu-testing', help='using multiple gpus for inference', default=False, action='store_true')
parser.add_argument('--vis', default=False, dest='vis', help='visualize detections', action='store_true')
parser.add_argument('--list_flag', default='val', help='Choosing between [val, test]')
parser.add_argument('--iou_ignore_threshold', default=0.5, help='Filter out by this iou')
return parser.parse_args()
if __name__ == '__main__':
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
logger = utils.logging.setup_logging(__name__)
args = parse_args()
logger.info('Called with args:')
logger.info(args)
assert (torch.cuda.device_count() == 1) ^ bool(args.multi_gpu_testing)
assert bool(args.load_ckpt) ^ bool(args.load_detectron), 'Exactly one of --load_ckpt and --load_detectron should be specified.'
if args.output_dir is None:
ckpt_path = args.load_ckpt if args.load_ckpt else args.load_detectron
args.output_dir = os.path.join(os.path.dirname(os.path.dirname(ckpt_path)), 'test')
logger.info('Automatically set output directory to %s', args.output_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cfg.VIS = args.vis
if args.cfg_file is not None:
merge_cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
merge_cfg_from_list(args.set_cfgs)
# Manually change the following:
cfg.TEST.DATASETS = ['Car3D',]
cfg.MODEL.NUM_CLASSES = 8
cfg.MODEL.NUMBER_CARS = 34
assert_and_infer_cfg()
logger.info('Testing with config:')
logger.info(pprint.pformat(cfg))
# For test_engine.multi_gpu_test_net_on_dataset
args.test_net_file, _ = os.path.splitext(__file__)
# manually set args.cuda
args.cuda = True
# Wudi hard coded the following range
if args.list_flag == 'test':
#args.range = [0, 1041]
i = 1
args.range = [i*125, (i+1)*125]
#args.range = [1000, 1041]
elif args.list_flag == 'val':
# args.range = [0, 206]
i = 3
args.range = [i*50, (i+1)*50]
args.range = [0, 206]
elif args.list_flag == 'train':
args.range = [4, 3888]
run_inference(
args,
ind_range=args.range,
multi_gpu_testing=args.multi_gpu_testing,
check_expected_results=True)
| [
"stevenwudi@gmail.com"
] | stevenwudi@gmail.com |
d185dff0848d40badb6664ead738792964b15ce0 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pzQXHMqizBmaLDCHc_17.py | 92b7a0873f57358e73a7f1a469c02827953bbc1a | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py |
def calculate_damage(your_type, opponent_type, attack, defense):
effectiveness=1
d = {('fire','grass'):2,('fire','water'):0.5,('fire','electric'):1,('water', 'grass'):0.5,
('water', 'electric'):0.5,('grass', 'electric'):1, ('grass','fire'):0.5,("grass", "water"):2}
for i in d:
if i == (your_type,opponent_type):
effectiveness=d[(your_type,opponent_type)]
damage = 50 * (attack / defense) * effectiveness
return damage
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9c2abe985a3afab65e9881bb794b7c361855e052 | 91b2fb1fb6df216f2e365c3366bab66a567fc70d | /Week09/每日一题/61. 旋转链表.py | 5edbeab4480fb64151aaafd831aeb3035a12ede2 | [] | no_license | hrz123/algorithm010 | d17aee642f03f607a7984beb099eec18f2de1c8e | 817911d4282d2e226518b3533dff28282a91b3d4 | refs/heads/master | 2022-12-20T14:09:26.365781 | 2020-10-11T04:15:57 | 2020-10-11T04:15:57 | 270,178,423 | 1 | 0 | null | 2020-06-07T03:21:09 | 2020-06-07T03:21:09 | null | UTF-8 | Python | false | false | 4,281 | py | # 61. 旋转链表.py
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head:
return head
size = self.get_size(head)
k %= size
dummy = ListNode(0)
dummy.next = head
new_tail = dummy
for _ in range(size - k):
new_tail = new_tail.next
tail = new_tail
while tail.next:
tail = tail.next
tail.next = dummy.next
dummy.next = new_tail.next
new_tail.next = None
return dummy.next
def get_size(self, head):
s = 0
while head:
s += 1
head = head.next
return s
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head:
return head
size = self.get_size(head)
dummy = ListNode(0)
dummy.next = head
k %= size
pre = dummy
for _ in range(size - k):
pre = pre.next
tail = pre
while tail.next:
tail = tail.next
tail.next = dummy.next
dummy.next = pre.next
pre.next = None
return dummy.next
def get_size(self, head):
c = 0
while head:
c += 1
head = head.next
return c
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or not head.next:
return head
h, size = head, 1
while h.next:
h = h.next
size += 1
h.next = head
k %= size
pre, cur = h, h.next
for _ in range(size - k):
pre, cur = cur, cur.next
pre.next = None
return cur
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or not head.next:
return head
h, size = head, 1
while h.next:
h, size = h.next, size + 1
h.next = head
k %= size
pre, cur = h, head
for _ in range(size - k):
pre, cur = cur, cur.next
pre.next = None
return cur
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or not head.next:
return head
h, size = head, 1
while h.next:
h, size = h.next, size + 1
h.next = head
k %= size
pre, cur = h, head
for _ in range(size - k):
pre, cur = cur, cur.next
pre.next = None
return cur
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or not head.next:
return head
h, size = head, 1
while h.next:
h, size = h.next, size + 1
h.next = head
k %= size
pre, cur = h, head
for _ in range(size - k):
pre, cur = cur, cur.next
pre.next = None
return cur
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or not head.next:
return head
h, size = head, 1
while h.next:
h, size = h.next, size + 1
h.next = head
k %= size
pre, cur = h, head
for _ in range(size - k):
pre, cur = cur, cur.next
pre.next = None
return cur
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or not head.next:
return head
h, size = head, 1
while h.next:
h, size = h.next, size + 1
h.next = head
k %= size
pre, cur = h, head
for _ in range(size - k):
pre, cur = cur, cur.next
pre.next = None
return cur
def main():
sol = Solution()
a = ListNode(1)
a.next = ListNode(2)
a.next.next = ListNode(3)
a.next.next.next = ListNode(4)
a.next.next.next.next = ListNode(5)
b = sol.rotateRight(a, 2)
def print_all(a):
while a:
print(a.val, end="->")
a = a.next
print_all(b)
if __name__ == '__main__':
main()
| [
"2403076194@qq.com"
] | 2403076194@qq.com |
fa19bb1aae904e95f252db7303fe8a7bf7953dce | fd882ae9ceab15868b102328ec33e9d3dbe73cb4 | /devil/devil/android/sdk/adb_compatibility_devicetest.py | d4e63ade953b10753d42d875a72bf7d2d58d2169 | [
"BSD-3-Clause"
] | permissive | rohitrayachoti/catapult | 9a904e4120dabdc61643897610ad894b06faa52b | cd2eebd327e35c839149f7a4d888b046d628df12 | refs/heads/master | 2022-03-16T11:47:18.234529 | 2020-10-09T20:10:31 | 2020-10-09T21:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,676 | py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import os
import posixpath
import random
import signal
import sys
import unittest
if sys.version_info.major >= 3:
basestring = str # pylint: disable=redefined-builtin
_CATAPULT_BASE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
sys.path.append(os.path.join(_CATAPULT_BASE_DIR, 'devil'))
from devil import devil_env
from devil.android import device_errors
from devil.android import device_test_case
from devil.android.sdk import adb_wrapper
from devil.utils import cmd_helper
from devil.utils import timeout_retry
_TEST_DATA_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'test', 'data'))
def _hostAdbPids():
ps_status, ps_output = cmd_helper.GetCmdStatusAndOutput(
['pgrep', '-l', 'adb'])
if ps_status != 0:
return []
pids_and_names = (line.split() for line in ps_output.splitlines())
return [int(pid) for pid, name in pids_and_names if name == 'adb']
class AdbCompatibilityTest(device_test_case.DeviceTestCase):
@classmethod
def setUpClass(cls):
custom_adb_path = os.environ.get('ADB_PATH')
custom_deps = {
'config_type': 'BaseConfig',
'dependencies': {},
}
if custom_adb_path:
custom_deps['dependencies']['adb'] = {
'file_info': {
devil_env.GetPlatform(): {
'local_paths': [custom_adb_path],
},
},
}
devil_env.config.Initialize(configs=[custom_deps])
def testStartServer(self):
# Manually kill off any instances of adb.
adb_pids = _hostAdbPids()
for p in adb_pids:
os.kill(p, signal.SIGKILL)
self.assertIsNotNone(
timeout_retry.WaitFor(
lambda: not _hostAdbPids(), wait_period=0.1, max_tries=10))
# start the adb server
start_server_status, _ = cmd_helper.GetCmdStatusAndOutput(
[adb_wrapper.AdbWrapper.GetAdbPath(), 'start-server'])
# verify that the server is now online
self.assertEquals(0, start_server_status)
self.assertIsNotNone(
timeout_retry.WaitFor(
lambda: bool(_hostAdbPids()), wait_period=0.1, max_tries=10))
def testKillServer(self):
adb_pids = _hostAdbPids()
if not adb_pids:
adb_wrapper.AdbWrapper.StartServer()
adb_pids = _hostAdbPids()
self.assertGreaterEqual(len(adb_pids), 1)
kill_server_status, _ = cmd_helper.GetCmdStatusAndOutput(
[adb_wrapper.AdbWrapper.GetAdbPath(), 'kill-server'])
self.assertEqual(0, kill_server_status)
adb_pids = _hostAdbPids()
self.assertEqual(0, len(adb_pids))
def testDevices(self):
devices = adb_wrapper.AdbWrapper.Devices()
self.assertNotEqual(0, len(devices), 'No devices found.')
def getTestInstance(self):
"""Creates a real AdbWrapper instance for testing."""
return adb_wrapper.AdbWrapper(self.serial)
def testShell(self):
under_test = self.getTestInstance()
shell_ls_result = under_test.Shell('ls')
self.assertIsInstance(shell_ls_result, basestring)
self.assertTrue(bool(shell_ls_result))
def testShell_failed(self):
under_test = self.getTestInstance()
with self.assertRaises(device_errors.AdbShellCommandFailedError):
under_test.Shell('ls /foo/bar/baz')
def testShell_externalStorageDefined(self):
under_test = self.getTestInstance()
external_storage = under_test.Shell('echo $EXTERNAL_STORAGE')
self.assertIsInstance(external_storage, basestring)
self.assertTrue(posixpath.isabs(external_storage))
@contextlib.contextmanager
def getTestPushDestination(self, under_test):
"""Creates a temporary directory suitable for pushing to."""
external_storage = under_test.Shell('echo $EXTERNAL_STORAGE').strip()
if not external_storage:
self.skipTest('External storage not available.')
while True:
random_hex = hex(random.randint(0, 2**52))[2:]
name = 'tmp_push_test%s' % random_hex
path = posixpath.join(external_storage, name)
try:
under_test.Shell('ls %s' % path)
except device_errors.AdbShellCommandFailedError:
break
under_test.Shell('mkdir %s' % path)
try:
yield path
finally:
under_test.Shell('rm -rf %s' % path)
def testPush_fileToFile(self):
under_test = self.getTestInstance()
with self.getTestPushDestination(under_test) as push_target_directory:
src = os.path.join(_TEST_DATA_DIR, 'push_file.txt')
dest = posixpath.join(push_target_directory, 'push_file.txt')
with self.assertRaises(device_errors.AdbShellCommandFailedError):
under_test.Shell('ls %s' % dest)
under_test.Push(src, dest)
self.assertEquals(dest, under_test.Shell('ls %s' % dest).strip())
def testPush_fileToDirectory(self):
under_test = self.getTestInstance()
with self.getTestPushDestination(under_test) as push_target_directory:
src = os.path.join(_TEST_DATA_DIR, 'push_file.txt')
dest = push_target_directory
resulting_file = posixpath.join(dest, 'push_file.txt')
with self.assertRaises(device_errors.AdbShellCommandFailedError):
under_test.Shell('ls %s' % resulting_file)
under_test.Push(src, dest)
self.assertEquals(resulting_file,
under_test.Shell('ls %s' % resulting_file).strip())
def testPush_directoryToDirectory(self):
under_test = self.getTestInstance()
with self.getTestPushDestination(under_test) as push_target_directory:
src = os.path.join(_TEST_DATA_DIR, 'push_directory')
dest = posixpath.join(push_target_directory, 'push_directory')
with self.assertRaises(device_errors.AdbShellCommandFailedError):
under_test.Shell('ls %s' % dest)
under_test.Push(src, dest)
self.assertEquals(
sorted(os.listdir(src)),
sorted(under_test.Shell('ls %s' % dest).strip().split()))
def testPush_directoryToExistingDirectory(self):
under_test = self.getTestInstance()
with self.getTestPushDestination(under_test) as push_target_directory:
src = os.path.join(_TEST_DATA_DIR, 'push_directory')
dest = push_target_directory
resulting_directory = posixpath.join(dest, 'push_directory')
with self.assertRaises(device_errors.AdbShellCommandFailedError):
under_test.Shell('ls %s' % resulting_directory)
under_test.Shell('mkdir %s' % resulting_directory)
under_test.Push(src, dest)
self.assertEquals(
sorted(os.listdir(src)),
sorted(under_test.Shell('ls %s' % resulting_directory).split()))
# TODO(jbudorick): Implement tests for the following:
# taskset -c
# devices [-l]
# pull
# shell
# ls
# logcat [-c] [-d] [-v] [-b]
# forward [--remove] [--list]
# jdwp
# install [-l] [-r] [-s] [-d]
# install-multiple [-l] [-r] [-s] [-d] [-p]
# uninstall [-k]
# backup -f [-apk] [-shared] [-nosystem] [-all]
# restore
# wait-for-device
# get-state (BROKEN IN THE M SDK)
# get-devpath
# remount
# reboot
# reboot-bootloader
# root
# emu
@classmethod
def tearDownClass(cls):
print
print
print 'tested %s' % adb_wrapper.AdbWrapper.GetAdbPath()
print ' %s' % adb_wrapper.AdbWrapper.Version()
print 'connected devices:'
try:
for d in adb_wrapper.AdbWrapper.Devices():
print ' %s' % d
except device_errors.AdbCommandFailedError:
print ' <failed to list devices>'
raise
finally:
print
if __name__ == '__main__':
sys.exit(unittest.main())
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
0e5570449a1fc4e0ffc94af74051560a30a4798b | 2d0d39b7c066d6f98199e5968dfe2ad3f078eb4a | /Python3/Dictionaries/pop_popitems_update.py | 37b39a2443fcda48fefc11e58ea3e32b96916f87 | [
"MIT"
] | permissive | norbertosanchezdichi/TIL | a232b8648eb41cfb6d74ed6f09affba94c7d6bbb | 45304c1896725fb8ffbe957f4da5f9a377f7ad62 | refs/heads/master | 2023-05-26T20:04:50.146277 | 2023-05-20T17:10:44 | 2023-05-20T17:10:44 | 222,038,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | student = {
'name': 'Norberto',
'owns_parrot' : True,
'favorite_language': 'Python',
25 : 'my favorite number!'}
print(f'{student =}')
print(f'{student.pop(25) =}')
print(f'{student =}')
print(f'{student.popitem() =}')
print(f'{student.popitem() =}')
print(f'{student =}')
person = {'city': 'Los Angeles'}
print(f'{person =}')
person.update(student)
print(f'{person =}')
person['name'] = 'Otrebron'
print(f'{person =}')
person.update({})
print(f'{person =}') | [
"norbertosanchezdichi@users.noreply.github.com"
] | norbertosanchezdichi@users.noreply.github.com |
e5ef6e99151307ff308b5f59eb0e82f785a86ec7 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/sympy/2015/12/authors_update.py | be1a714c8269f40c37c1f3444979dfcf925bc2b3 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 2,770 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A tool generate AUTHORS. We started tracking authors before moving to git, so
we have to do some manual rearrangement of the git history authors in order to
get the order in AUTHORS.
"""
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
from fabric.api import local, env
from fabric.colors import yellow, blue, green, red
from fabric.utils import error
mailmap_update_path = os.path.abspath(__file__)
mailmap_update_dir = os.path.dirname(mailmap_update_path)
sympy_top = os.path.split(mailmap_update_dir)[0]
sympy_dir = os.path.join(sympy_top, 'sympy')
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
from sympy.utilities.misc import filldedent
try:
# Only works in newer versions of fabric
env.colorize_errors = True
except AttributeError:
pass
git_command = """git log --topo-order --reverse --format="%aN <%aE>" | awk ' !x[$0]++'"""
git_people = unicode(local(git_command, capture=True), 'utf-8').strip().split("\n")
from distutils.version import LooseVersion
git_ver = local('git --version', capture=True)[12:]
if LooseVersion(git_ver) < LooseVersion('1.8.4.2'):
print(yellow("Please use a newer git version >= 1.8.4.2"))
def move(l, i1, i2):
x = l.pop(i1)
l.insert(i2, x)
# Do the few changes necessary in order to reproduce AUTHORS:
move(git_people, 2, 0) # Ondřej Čertík
move(git_people, 42, 1) # Fabian Pedregosa
move(git_people, 22, 2) # Jurjen N.E. Bos
git_people.insert(4, "*Marc-Etienne M.Leveille <protonyc@gmail.com>")
move(git_people, 10, 5) # Brian Jorgensen
git_people.insert(11, "*Ulrich Hecht <ulrich.hecht@gmail.com>")
git_people.pop(12) # Kirill Smelkov
move(git_people, 12, 32) # Sebastian Krämer
git_people.insert(35, "*Case Van Horsen <casevh@gmail.com>")
git_people.insert(43, "*Dan <coolg49964@gmail.com>")
move(git_people, 57, 59) # Aaron Meurer
move(git_people, 58, 57) # Andrew Docherty
move(git_people, 67, 66) # Chris Smith
move(git_people, 79, 76) # Kevin Goodsell
git_people.insert(84, "*Chu-Ching Huang <cchuang@mail.cgu.edu.tw>")
move(git_people, 93, 92) # James Pearson
git_people.pop(226) # Sergey B Kirpichev
header = """\
All people who contributed to SymPy by sending at least a patch or more (in the
order of the date of their first contribution), except those who explicitly
didn't want to be mentioned. People with a * next to their names are not found
in the metadata of the git history. This file is generated automatically by
running `./bin/authors_update.py`.
"""
fd = open(os.path.realpath(os.path.join(__file__, os.path.pardir,
os.path.pardir, "AUTHORS")), "w")
fd.write(header)
fd.write("\n")
fd.write("\n".join(git_people).encode("utf8"))
fd.write("\n")
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
3f9e25b9467dc1ba529923a9c89c66a19ee6aacd | 19cf0afe2ee84711337a661630974c74dd29d946 | /CLOVER/nature2017/mcs_energy.py | 314bfeee39f64e5f79f591c488f20ac33ab1abe2 | [] | no_license | zhpfu/proj_CEH | b253bfe9334a372af7d9de7ba21cb57e52b4f370 | b4be27bdf1e4452baff276014da014b7ff89fddc | refs/heads/master | 2022-12-26T11:27:48.126308 | 2020-10-02T19:06:36 | 2020-10-02T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py |
from decimal import Decimal
lv = 2.26*1e6 # energy of evaporation in J / kg of water
seconds_day = (60*60*24)
storm_acc_rain = 19 # mm / storm on average or kg / m2
wetted_area = 100000*1e6 # storm area in square metres 12 m/s over 24 hours
storm_per_m2 = storm_acc_rain * lv # J / m2 = W s / m2
print('%.2E' % Decimal(storm_per_m2), 'J/m2')
lifetime = storm_per_m2 / seconds_day # W / m2
print('for every m2', lifetime) # W / m2
watt = lifetime*wetted_area
print('24h storm with 100000km2 wet', '%.2E' % Decimal(watt), 'Watt')
print('Watt hours', '%.2E' % Decimal(watt*24)) | [
"cornelia.klein@gmx.de"
] | cornelia.klein@gmx.de |
9b6dba52e389e6fefc316b128ba47280ee641249 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5648941810974720_1/Python/iakl/solution_large.py | d656ecb440e5e40876b6bfd9698fe80d36b9c7e0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | input_file = "A-large.in"
output_file = "A-large.out"
in_f = open(input_file)
T = -1
N = -1
tcs = []
for l in in_f:
sl = l.strip()
if len(sl) == 0:
continue
if T == -1:
T = int(sl)
continue
tcs.append(sl)
in_f.close()
out_f = open(output_file, "w")
def OutputTestCase(tcn, x):
out_f.write("Case #" + str(tcn) + ": " + str(x) + "\n")
def GetWord(d, w, c):
for ch in w:
d[ch] -= c
def CheckWord(d, ch, w, dgs, dg):
if ch in d and d[ch] > 0:
for i in range(0, d[ch]):
dgs.append(dg)
GetWord(d, w, d[ch])
def SolveTestCase(w):
d = dict()
for ch in w:
if not ch in d:
d[ch] = 0
d[ch] += 1
dgs = []
CheckWord(d, 'Z', 'ZERO', dgs, 0)
CheckWord(d, 'W', 'TWO', dgs, 2)
CheckWord(d, 'U', 'FOUR', dgs, 4)
CheckWord(d, 'X', 'SIX', dgs, 6)
CheckWord(d, 'G', 'EIGHT', dgs, 8)
CheckWord(d, 'H', 'THREE', dgs, 3)
CheckWord(d, 'S', 'SEVEN', dgs, 7)
CheckWord(d, 'O', 'ONE', dgs, 1)
CheckWord(d, 'V', 'FIVE', dgs, 5)
CheckWord(d, 'I', 'NINE', dgs, 9)
dgs = sorted(dgs)
r = ""
for d in dgs:
r = r + str(d)
return r
#print tcs
for i in range(0, T):
print "Case #" + str(i + 1)
r = SolveTestCase(tcs[i])
OutputTestCase(i + 1, r)
out_f.close()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
d773bda77e43e291d97be37bee13c098710d31cf | 7c551e749064b25af706b9167211050f8c6ad0a9 | /signatures/windows/infostealer_keylogger.py | 53b784f83f339cd94322d7f17b1539b9363316a7 | [] | no_license | dashjuvi/Cuckoo-Sandbox-vbox-win7 | fa382828b4895c5e1ee60b37a840edd395bf1588 | a3a26b539b06db15176deadeae46fc0476e78998 | refs/heads/master | 2020-03-12T08:33:06.231245 | 2019-01-14T23:09:02 | 2019-01-14T23:09:02 | 130,529,882 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | # Copyright (C) 2012 Thomas "stacks" Birn (@stacksth)
# Copyright (C) 2014 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Keylogger(Signature):
name = "infostealer_keylogger"
description = "Creates a windows hook that monitors keyboard input (keylogger)"
severity = 3
categories = ["generic"]
authors = ["Thomas Birn", "nex"]
minimum = "2.0"
filter_apinames = "SetWindowsHookExA", "SetWindowsHookExW"
def on_call(self, call, process):
if call["arguments"]["hook_identifier"] in [2, 13]:
if not call["arguments"]["thread_identifier"]:
self.mark_call()
return True
| [
"diegovm14@gmail.com"
] | diegovm14@gmail.com |
0120a0666de2492a42fec06064f920cf942ac669 | 0c6100dc16291986fab157ed0437f9203f306f1b | /2000- 3000/2356.py | 6b7f8786e155e42660feea4e0eb1af9ab1f4caa9 | [] | no_license | Matuiss2/URI-ONLINE | 4c93c139960a55f7cc719d0a3dcd6c6c716d3924 | 6cb20f0cb2a6d750d58b826e97c39c11bf8161d9 | refs/heads/master | 2021-09-17T09:47:16.209402 | 2018-06-30T08:00:14 | 2018-06-30T08:00:14 | 110,856,303 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | while True:
try:
seq1 = input()
seq2 = input()
except EOFError:
break
if seq2 in seq1: # Se a sequencia 2 estiver contida na sequencia 1, ela é resistente
print("Resistente")
else:
print("Nao resistente")
| [
"noreply@github.com"
] | Matuiss2.noreply@github.com |
773f7fef36812d17cd02f2a0c2a452e95541bfd7 | 6872caaa6c3bb59995627064ed1ab63df403bdf6 | /eyantra_provider/venv/Lib/site-packages/authlib/jose/rfc7518/_backends/_key_cryptography.py | 066c5da1e86247fa58b95e4ddde79714d1aed8d9 | [
"MIT"
] | permissive | Andreaf2395/OpenID-Provider | 3189780631d9057140e233930ace72e9bfc76e58 | cdedd42cc49e6f03e3b2570c03fb1f4a2c83be34 | refs/heads/Sameeksha_Final_Provider | 2023-08-21T16:05:42.864159 | 2020-06-18T18:47:16 | 2020-06-18T18:47:16 | 273,314,708 | 0 | 0 | MIT | 2020-06-18T18:48:34 | 2020-06-18T18:44:29 | Python | UTF-8 | Python | false | false | 1,580 | py | from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_pem_public_key, load_ssh_public_key
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKey, RSAPublicKey
)
from cryptography.hazmat.primitives.asymmetric.ec import (
EllipticCurvePrivateKey, EllipticCurvePublicKey
)
from cryptography.hazmat.backends import default_backend
from authlib.common.encoding import to_bytes
class RSAKey(object):
def prepare_private_key(self, key):
if isinstance(key, RSAPrivateKey):
return key
key = to_bytes(key)
return load_pem_private_key(key, password=None, backend=default_backend())
def prepare_public_key(self, key):
if isinstance(key, RSAPublicKey):
return key
key = to_bytes(key)
if key.startswith(b'ssh-rsa'):
return load_ssh_public_key(key, backend=default_backend())
else:
return load_pem_public_key(key, backend=default_backend())
class ECKey(object):
def prepare_private_key(self, key):
if isinstance(key, EllipticCurvePrivateKey):
return key
key = to_bytes(key)
return load_pem_private_key(key, password=None, backend=default_backend())
def prepare_public_key(self, key):
if isinstance(key, EllipticCurvePublicKey):
return key
key = to_bytes(key)
if key.startswith(b'ecdsa-sha2-'):
return load_ssh_public_key(key, backend=default_backend())
return load_pem_public_key(key, backend=default_backend())
| [
"sameekshabhatia6@gmail.com"
] | sameekshabhatia6@gmail.com |
01916b1dc80855030aa378e97495ed5099a7b2a1 | 1c562b288a92dbef9ee76744f73acd334ba56306 | /jaguar/tests/test_room_metadata.py | 38f6ba87fbbf6b36be13158c768b806276852ce4 | [] | no_license | mkhfring/pychat | 862ffaaee01ea5927e94640e19d88d698ed170af | 8d7c4ea8eb35d8216c2f4194b00483995052b8ea | refs/heads/master | 2021-03-13T07:13:55.433143 | 2020-03-11T18:44:26 | 2020-03-11T18:44:26 | 246,652,274 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | from bddrest.authoring import status, response
from jaguar.tests.helpers import AutoDocumentationBDDTest
class TestRoomMetadata(AutoDocumentationBDDTest):
def test_metadata(self):
with self.given('Test metadata verb', '/apiv1/rooms', 'METADATA'):
assert status == 200
fields = response.json['fields']
assert fields['type']['maxLength'] is not None
assert fields['type']['minLength'] is not None
assert fields['type']['name'] is not None
assert fields['type']['notNone'] is not None
assert fields['type']['required'] is not None
assert fields['type']['watermark'] is not None
assert fields['type']['example'] is not None
assert fields['type']['message'] is not None
| [
"khajezade.mohamad@gmail.com"
] | khajezade.mohamad@gmail.com |
9df6392902b582ba186f117b11700d66985b2de9 | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /lxml/etree/CommentBase.py | 29cd0fa1b6fe12df733b0e6bce2364a1a113ec15 | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | # encoding: utf-8
# module lxml.etree
# from F:\Python\Python36\lib\site-packages\lxml\etree.cp36-win_amd64.pyd
# by generator 1.147
""" The ``lxml.etree`` module implements the extended ElementTree API for XML. """
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
from ._Comment import _Comment
class CommentBase(_Comment):
"""
All custom Comment classes must inherit from this one.
To create an XML Comment instance, use the ``Comment()`` factory.
Subclasses *must not* override __init__ or __new__ as it is
absolutely undefined when these objects will be created or
destroyed. All persistent state of Comments must be stored in the
underlying XML. If you really need to initialize the object after
creation, you can implement an ``_init(self)`` method that will be
called after object creation.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
__pyx_vtable__ = None # (!) real value is '<capsule object NULL at 0x000001F681EE3C00>'
| [
"5149528+ventifang@user.noreply.gitee.com"
] | 5149528+ventifang@user.noreply.gitee.com |
520fb4963fd27b6f09e539e0d5fdf44b99f18a32 | ba1d0f05e2faf2f21f076c90960e436db2930d36 | /src/items/view.py | be96e57742877f7c5343250254dacff23ff56a4a | [
"MIT"
] | permissive | elipavlov/items | 94d553d05bab4e444c1172f96045058da387db64 | c935e3321284af251c3339e72531f26e9dd64802 | refs/heads/master | 2021-01-22T20:18:27.036787 | 2017-03-20T18:18:20 | 2017-03-20T18:18:20 | 85,308,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,752 | py | # coding=utf-8
from sqlite3 import IntegrityError
from flask import json, request, url_for, flash, jsonify, session
from items.exceptions import DataExtractionError
from sqlalchemy import exc
from .config import app
from .models import db, Item
from logging import getLogger
logger = getLogger(__name__)
def _process_error_response(error):
flash(str(error))
return app.response_class(
response=json.dumps(dict(
status='fail',
error_type=type(error).__name__,
error=str(error))),
status=400,
mimetype='application/json'
)
@app.route('%sadd' % app.config.get('API_PATH'), methods=['POST'])
def add_item():
data = None
try:
data = json.loads(request.data)
except ValueError as e:
flash('Data decoding error')
response = _process_error_response(e)
if data:
try:
item = Item(**data)
db.session.add(item)
db.session.commit()
response = jsonify(status='ok')
except (IntegrityError, exc.IntegrityError) as e:
print(str(e))
logger.warning(str(e))
db.session.rollback()
response = _process_error_response(
ValueError('Unpropper input data')
)
except (TypeError, DataExtractionError) as e:
response = _process_error_response(e)
return response
@app.route('%sitems' % app.config.get('API_PATH'))
def get_items_list():
items = [row.to_response()
for row in db.session.query(Item)
.filter((Item.start_time + Item.days*86400 + 12*86400) < db.func.current_date())
if not row.expired()]
response = jsonify(status='ok', items=items)
return response
@app.route('%sitems/' % app.config.get('API_PATH'), defaults={'path': ''})
@app.route('%sitems/<path:path>' % app.config.get('API_PATH'))
def get_item(path):
if not path:
response = _process_error_response(ValueError('Wrong requested id'))
else:
item = db.session.query(Item).filter(Item.id == path).first()
if not item:
response = app.response_class(
response=json.dumps(dict(status='not found')),
status=404,
mimetype='application/json'
)
else:
if item.expired():
db.session.delete(item)
db.session.commit()
response = app.response_class(
response=json.dumps(dict(status='not found')),
status=404,
mimetype='application/json'
)
else:
response = jsonify(item.to_response())
return response
| [
"eli.pavlov.vn@gmail.com"
] | eli.pavlov.vn@gmail.com |
bbd765025d00ad5f5576e91fbfc14956a25fa47a | c600f82e32bb1cbe22c6aff42371b1526ecae440 | /src/livestreamer/packages/flashmedia/flv.py | 7671fe0f2ae8ded170b8b509a32fa79bb9459f33 | [
"BSD-2-Clause"
] | permissive | john-peterson/livestreamer | 6445a91cfd6f2002c2184c8c125934a1b5616cea | 5b268a9cf1a157bbd2fecd3e61c110f046680ab1 | refs/heads/master | 2020-12-01T03:02:56.109651 | 2012-12-30T16:14:15 | 2012-12-30T16:14:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #!/usr/bin/env python
from .tag import Header, Tag
from .compat import is_py2
class FLV(object):
def __init__(self, fd=None):
self.fd = fd
self.header = Header.deserialize(self.fd)
def __iter__(self):
return self
def __next__(self):
try:
tag = Tag.deserialize(self.fd)
except IOError:
raise StopIteration
return tag
if is_py2:
next = __next__
__all__ = ["FLV"]
| [
"chrippa@tanuki.se"
] | chrippa@tanuki.se |
a217ad299a4c52b3e630ca5237cbe36640af382d | 77e22775135dff0de080573c7a6e83ef373fe4cb | /dl/data/base/datasets.py | 20962cd49a0f1a1f01673ec07dc9f00f7f197657 | [
"MIT"
] | permissive | flyingGH/pytorch.dl | bfcd23ddbc7d405cbba7ce15695e0dda75b755fe | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | refs/heads/master | 2023-06-01T11:21:15.636450 | 2021-03-31T05:47:31 | 2021-03-31T05:47:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,809 | py | from torch.utils.data import Dataset
import abc, os, cv2
import numpy as np
from pycocotools.coco import COCO
from xml.etree import ElementTree as ET
from .exceptions import _TargetTransformBaseException, MaximumReapplyError
from .._utils import DATA_ROOT, _get_xml_et_value, _check_ins
reapply_in_exception = True
maximum_reapply = 10
class ImageDatasetBase(Dataset):
def __init__(self, transform=None, target_transform=None, augmentation=None):
"""
:param transform: instance of transforms
:param target_transform: instance of target_transforms
:param augmentation: instance of augmentations
"""
self.transform = transform
self.target_transform = target_transform # _contain_ignore(target_transform)
self.augmentation = augmentation
@abc.abstractmethod
def _get_image(self, index):
"""
:param index: int
:return:
rgb image(Tensor)
"""
raise NotImplementedError('\'_get_image\' must be overridden')
@abc.abstractmethod
def _get_target(self, index):
"""
:param index: int
:return:
list of bboxes, list of bboxes' label index, list of flags([difficult, truncated])
"""
raise NotImplementedError('\'_get_target\' must be overridden')
def get_imgtarget(self, index, count=0):
"""
:param index: int
:return:
img : rgb image(Tensor or ndarray)
targets : Tensor or array-like labels
"""
try:
img = self._get_image(index)
targets = self._get_target(index)
img, targets = self.apply_transform(img, *targets)
return img, targets
except _TargetTransformBaseException as e:
if count == maximum_reapply:
raise MaximumReapplyError('Maximum Reapplying reached: {}. last error was {}'.format(count, str(e)))
elif reapply_in_exception:
return self.get_imgtarget(np.random.randint(len(self)), count + 1)
else:
raise e
def __getitem__(self, index):
"""
:param index: int
:return:
img : rgb image(Tensor or ndarray)
targets : Tensor or array-like labels
"""
return self.get_imgtarget(index)
def apply_transform(self, img, *targets):
"""
IMPORTATANT: apply transform function in order with ignore, augmentation, transform and target_transform
:param img:
:param targets:
:return:
Transformed img, targets, args
"""
if self.augmentation:
img, targets = self.augmentation(img, *targets)
if self.transform:
img = self.transform(img)
if self.target_transform:
targets = self.target_transform(*targets)
return img, targets
@abc.abstractmethod
def __len__(self):
pass
class COCODatasetMixin:
_coco_dir: str
_focus: str
_coco: COCO
_imageids: list
def _jpgpath(self, filename):
"""
:param filename: path containing .jpg
:return: path of jpg
"""
return os.path.join(self._coco_dir, 'images', self._focus, filename)
def _get_image(self, index):
"""
:param index: int
:return:
rgb image(ndarray)
"""
"""
self._coco.loadImgs(self._imageids[index]): list of dict, contains;
license: int
file_name: str
coco_url: str
height: int
width: int
date_captured: str
flickr_url: str
id: int
"""
filename = self._coco.loadImgs(self._imageids[index])[0]['file_name']
img = cv2.imread(self._jpgpath(filename))
# pytorch's image order is rgb
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img.astype(np.float32)
class VOCDatasetMixin:
_voc_dir: str
_annopaths: list
def _jpgpath(self, filename):
"""
:param filename: path containing .jpg
:return: path of jpg
"""
return os.path.join(self._voc_dir, 'JPEGImages', filename)
"""
Detail of contents in voc > https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5
VOC bounding box (xmin, ymin, xmax, ymax)
"""
def _get_image(self, index):
"""
:param index: int
:return:
rgb image(ndarray)
"""
root = ET.parse(self._annopaths[index]).getroot()
img = cv2.imread(self._jpgpath(_get_xml_et_value(root, 'filename')))
# pytorch's image order is rgb
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img.astype(np.float32) | [
"kado_89_ssp_jun@yahoo.co.jp"
] | kado_89_ssp_jun@yahoo.co.jp |
c2f89f6c6b456ec887bd59fcc97d7a64f2a6337c | a58d930f68712bc9635911af646711201bd1634d | /Lib/site-packages/flask_debugtoolbar/panels/sqlalchemy.py | ded1c101a228d80486fb6d8e410af07cb20adbe6 | [
"MIT"
] | permissive | kyle8998/AppGen | 9999d5d895531ab2dd704e54f016d0e12e74e81a | 4cc9e57c85363d41dc39c8a4687c9f6ada103beb | refs/heads/master | 2022-12-01T17:06:02.824557 | 2017-11-20T21:53:23 | 2017-11-20T21:53:23 | 84,370,560 | 2 | 1 | MIT | 2022-11-27T00:26:13 | 2017-03-08T22:05:01 | Python | UTF-8 | Python | false | false | 4,128 | py | try:
from flask_sqlalchemy import get_debug_queries, SQLAlchemy
except ImportError:
sqlalchemy_available = False
get_debug_queries = SQLAlchemy = None
else:
sqlalchemy_available = True
from flask import request, current_app, abort, json_available, g
from flask_debugtoolbar import module
from flask_debugtoolbar.panels import DebugPanel
from flask_debugtoolbar.utils import format_fname, format_sql
import itsdangerous
_ = lambda x: x
def query_signer():
return itsdangerous.URLSafeSerializer(current_app.config['SECRET_KEY'],
salt='fdt-sql-query')
def is_select(statement):
prefix = b'select' if isinstance(statement, bytes) else 'select'
return statement.lower().strip().startswith(prefix)
def dump_query(statement, params):
if not params or not is_select(statement):
return None
try:
return query_signer().dumps([statement, params])
except TypeError:
return None
def load_query(data):
try:
statement, params = query_signer().loads(request.args['query'])
except (itsdangerous.BadSignature, TypeError):
abort(406)
# Make sure it is a select statement
if not is_select(statement):
abort(406)
return statement, params
def extension_used():
return 'sqlalchemy' in current_app.extensions
def recording_enabled():
return (current_app.debug
or current_app.config.get('SQLALCHEMY_RECORD_QUERIES'))
def is_available():
return (json_available and sqlalchemy_available
and extension_used() and recording_enabled())
def get_queries():
if get_debug_queries:
return get_debug_queries()
else:
return []
class SQLAlchemyDebugPanel(DebugPanel):
"""
Panel that displays the time a response took in milliseconds.
"""
name = 'SQLAlchemy'
@property
def has_content(self):
return bool(get_queries()) or not is_available()
def process_request(self, request):
pass
def process_response(self, request, response):
pass
def nav_title(self):
return _('SQLAlchemy')
def nav_subtitle(self):
count = len(get_queries())
if not count and not is_available():
return 'Unavailable'
return '%d %s' % (count, 'query' if count == 1 else 'queries')
def title(self):
return _('SQLAlchemy queries')
def url(self):
return ''
def content(self):
queries = get_queries()
if not queries and not is_available():
return self.render('panels/sqlalchemy_error.html', {
'json_available': json_available,
'sqlalchemy_available': sqlalchemy_available,
'extension_used': extension_used(),
'recording_enabled': recording_enabled(),
})
data = []
for query in queries:
data.append({
'duration': query.duration,
'sql': format_sql(query.statement, query.parameters),
'signed_query': dump_query(query.statement, query.parameters),
'context_long': query.context,
'context': format_fname(query.context)
})
return self.render('panels/sqlalchemy.html', {'queries': data})
# Panel views
@module.route('/sqlalchemy/sql_select', methods=['GET', 'POST'])
@module.route('/sqlalchemy/sql_explain', methods=['GET', 'POST'],
defaults=dict(explain=True))
def sql_select(explain=False):
statement, params = load_query(request.args['query'])
engine = SQLAlchemy().get_engine(current_app)
if explain:
if engine.driver == 'pysqlite':
statement = 'EXPLAIN QUERY PLAN\n%s' % statement
else:
statement = 'EXPLAIN\n%s' % statement
result = engine.execute(statement, params)
return g.debug_toolbar.render('panels/sqlalchemy_select.html', {
'result': result.fetchall(),
'headers': result.keys(),
'sql': format_sql(statement, params),
'duration': float(request.args['duration']),
})
| [
"kylelim8998@gmail.com"
] | kylelim8998@gmail.com |
f06396f839a325246ee0801bec2dd517652b1377 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/76/usersdata/179/39313/submittedfiles/jogoDaVelha.py | dc18ac8faf0b1370501cfc88faf49f897ae00dfb | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # -*- coding: utf-8 -*-
import math
x1 = int(input('Digite x1: '))
x2 = int(input('Digite x2: '))
x3 = int(input('Digite x3: '))
x4 = int(input('Digite x4: '))
x5 = int(input('Digite x5: '))
x6 = int(input('Digite x6: '))
x7 = int(input('Digite x7: '))
x8 = int(input('Digite x8: '))
x9 = int(input('Digite x9: '))
if x1==x5==x6:
print('E')
elif x2==x4==x7:
print('E')
elif x3==x8==x9:
print('E')
if x1==x2==x3:
print('0')
elif x4==x5==x6:
print('0')
elif x7==x8==x9:
print('0')
elif x1==x5==x9:
print('0')
elif x3==x5==x7:
print('0')
elif x1==x4==x7:
print('0')
elif x2==x5==x8:
print('0')
elif x3==x6==x9:
print('0')
if x1==x2==x3:
print('1')
elif x4==x5==x6:
print('1')
elif x7==x8==x9:
print('1')
elif x1==x5==x9:
print('1')
elif x3==x5==x7:
print('1')
elif x1==x4==x7:
print('1')
elif x2==x5==x8:
print('1')
elif x3==x6==x9:
print('1')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
807298af3e1ebd4145bdd555747f385744339491 | 309dbf12ce8bb5ddb935978628f9a675141cffa5 | /rev-linked-list/reversell.py | 44d54c38073c38ee870b2913c2de6fd2e49990dc | [] | no_license | eljlee/hb-code-challenges | 38cc55df8cbf13f1c516cc315734ea029c6ce08d | 3a190794483003a52ca7fd43349dad6aed252eee | refs/heads/master | 2020-03-08T06:21:14.977981 | 2018-05-01T06:07:06 | 2018-05-01T06:07:06 | 127,969,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | """Given linked list head node, return head node of new, reversed linked list.
For example:
>>> ll = Node(1, Node(2, Node(3)))
>>> reverse_linked_list(ll).as_string()
'321'
"""
class Node(object):
"""Class in a linked list."""
def __init__(self, data, next=None):
self.data = data
self.next = next
def as_string(self):
"""Represent data for this node and it's successors as a string.
>>> Node(3).as_string()
'3'
>>> Node(3, Node(2, Node(1))).as_string()
'321'
"""
out = []
n = self
while n:
out.append(str(n.data))
n = n.next
return "".join(out)
def reverse_linked_list(head):
"""Given LL head node, return head node of new, reversed linked list.
>>> ll = Node(1, Node(2, Node(3)))
>>> reverse_linked_list(ll).as_string()
'321'
"""
new_head = None
while head:
new_head = Node(head.data, new_head)
head = head.next
return new_head
if __name__ == '__main__':
import doctest
if doctest.testmod().failed == 0:
print "\n*** ALL TESTS PASSED. RIGHT ON!\n"
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
01fbc51fb60705df1d17f7752984ecf95387c70c | 76e6d4f93078327fef8672133fc75a6f12abc240 | /ABC166/Test_C.py | 21670db70c1607836575dd879ec8512eea46c6fc | [] | no_license | adusa1019/atcoder | 1e8f33253f6f80a91d069b2f3b568ce7a2964940 | f7dbdfc021425160a072f4ce4e324953a376133a | refs/heads/master | 2021-08-08T04:41:36.098678 | 2021-02-01T07:34:34 | 2021-02-01T07:34:34 | 89,038,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | import pytest
from C import solve
def test_solve():
assert solve('4 3\n1 2 3 4\n1 3\n2 3\n2 4') == '2'
assert solve('6 5\n8 6 9 1 2 1\n1 3\n4 2\n4 3\n4 6\n4 6') == '3'
| [
"symphony20030829@yahoo.co.jp"
] | symphony20030829@yahoo.co.jp |
b9eb79ac90d988d56bfe2b1946ca1d9a20bc13c4 | 20a18ea0d2738477c5a117f80154c195c6ff2679 | /nova/tests/unit/scheduler/filters/test_vcpu_model_filter.py | 29e35d8e88efc3989c239a0f1635d3a3eb84a908 | [
"Apache-2.0"
] | permissive | hustlzp1981/stx-nova | 1300fa9757a29b2d00ef587c71ebd98171077d10 | c52432b3e7a240817a2de06321a2459f4862ab6a | refs/heads/master | 2020-04-26T03:21:12.797447 | 2019-03-01T17:40:14 | 2019-03-01T17:40:14 | 173,264,343 | 0 | 0 | Apache-2.0 | 2019-03-01T08:28:15 | 2019-03-01T08:28:15 | null | UTF-8 | Python | false | false | 7,430 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2016-2017 Wind River Systems, Inc.
#
from nova import objects
from nova.scheduler.filters import vcpu_model_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestVCPUModelFilter(test.NoDBTestCase):
def setUp(self):
super(TestVCPUModelFilter, self).setUp()
self.filt_cls = vcpu_model_filter.VCpuModelFilter()
def test_vcpu_model_not_specified(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs={}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_flavor_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Nehalem'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell"}'})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_flavor_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Nehalem'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Conroe"}'})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_image_passes(self):
props = objects.ImageMetaProps(hw_cpu_model='Nehalem')
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs={}),
image=objects.ImageMeta(properties=props),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell"}'})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_vcpu_model_image_fails(self):
props = objects.ImageMetaProps(hw_cpu_model='Nehalem')
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024, extra_specs={}),
image=objects.ImageMeta(properties=props),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Conroe"}'})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_vcpu_model_flavor_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", "features": ["vmx"]}'})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['migrating'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", '
'"features": ["pge", "avx", "vmx"]}'})
self.stub_out('nova.objects.ComputeNode.get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['migrating'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "IvyBridge", '
'"features": ["pge", "avx", "vmx"]}'})
self.stub_out('nova.objects.ComputeNode.get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_features_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['migrating'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", '
'"features": ["pge", "avx", "vmx", "clflush"]}'})
self.stub_out('nova.objects.ComputeNode.get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_passthrough_migrate_vcpu_model_flavor_kvm_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(extra_specs={'hw:cpu_model': 'Passthrough'}),
image=objects.ImageMeta(properties=objects.ImageMetaProps()),
scheduler_hints={'task_state': ['scheduling'], 'host': ['host1'],
'node': ['node1']})
host = fakes.FakeHostState('host1', 'node1',
{'cpu_info': '{"model": "Broadwell", '
'"features": ["pge", "avx"]}'})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def _fake_compute_node_get_by_host_and_nodename(self, cn, ctx, host, node):
cpu_info = '{"model": "Broadwell", "features": ["pge", "avx", "vmx"]}'
compute_node = objects.ComputeNode(cpu_info=cpu_info)
return compute_node
| [
"dtroyer@gmail.com"
] | dtroyer@gmail.com |
dfc6c6bcfb803410e8e29e9372cbfc20346520ac | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_101342.12+260620.0/sdB_SDSSJ_101342.12+260620.0_lc.py | 9a011b1d702d1b617d49f9c4a2d9a77ee7bb3f10 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[153.4255,26.105556], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SDSSJ_101342.12+260620.0 /sdB_SDSSJ_101342.12+260620.0_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
6d08ddadf72bb842f38dcab1040eb5c887a7b5ba | e6e81d0cd02223ca27f2c3f544b3c116e7617270 | /LeetCodePremium/77.combinations.py | 4e7d7a1ed0cc1d5b18cdec815c2f06e8eceb101c | [] | no_license | ashjambhulkar/objectoriented | 86166640b0546713095dd5d8804fc78d31782662 | 6f07b50590ceef231be38d6d7b8c73a40c1152e9 | refs/heads/master | 2022-05-03T23:28:38.674275 | 2022-04-26T21:37:31 | 2022-04-26T21:37:31 | 249,091,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | #
# @lc app=leetcode id=77 lang=python3
#
# [77] Combinations
#
# @lc code=start
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
# sample = [i for i in range(1, n+1)]
# result = []
# def helper(sample, temp):
# if len(temp) == k:
# result.append(temp[:])
# elif len(temp) <= k:
# for i in range(len(sample)):
# temp.append(sample[i])
# helper(sample[i+1:],temp)
# temp.pop()
# helper(sample, [])
# return result
sample = [i for i in range(1, n+1)]
result = []
def helper(sample, temp):
if len(temp) == k:
result.append(temp[:])
for i in range(len(sample)):
temp.append(sample[i])
helper(sample[i+1:], temp)
temp.remove(sample[i])
helper(sample, [])
return result
# @lc code=end
| [
"ashjambhulkar@hotmail.com"
] | ashjambhulkar@hotmail.com |
972ea4c7a9f98ddd846f80c492b024ad39d1440a | 60a7e7dc2ba82c5c74352dc8466a9becba068e2e | /backend2/userapi1/migrations/0001_initial.py | 8c0c9e319f589c09a402c93581e1c8af2e666c7e | [] | no_license | TestTask12/SKD-Django- | d7b445d8afd32fe5aa877c31451b7f2d932d2fe7 | 47bf79071d2781d129794e9b47d396cfd9162d00 | refs/heads/master | 2023-07-06T10:08:04.156998 | 2021-08-02T12:26:04 | 2021-08-02T12:26:04 | 387,738,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py | # Generated by Django 3.2.5 on 2021-07-23 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(db_index=True, max_length=255, unique=True)),
('email', models.EmailField(db_index=True, max_length=255, unique=True)),
('is_verified', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"sumankumariideal@gmail.com"
] | sumankumariideal@gmail.com |
cd347060e1da3d2391d0143e12892f9eac6c4346 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/storage/azure-mgmt-storage/generated_samples/blob_containers_put_immutability_policy_allow_protected_append_writes_all.py | 959e1d06da38a2e43845d975a656d18959590c16 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,700 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.storage import StorageManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-storage
# USAGE
python blob_containers_put_immutability_policy_allow_protected_append_writes_all.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = StorageManagementClient(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.blob_containers.create_or_update_immutability_policy(
resource_group_name="res1782",
account_name="sto7069",
container_name="container6397",
)
print(response)
# x-ms-original-file: specification/storage/resource-manager/Microsoft.Storage/stable/2022-09-01/examples/BlobContainersPutImmutabilityPolicyAllowProtectedAppendWritesAll.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
980c0b0837b2db2ae0a95f0e3aec09938d06100f | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/cce_job_spec.py | dc9786d83d3d306eea915bbbb9939ea87a5e228b | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,808 | py | # coding: utf-8
import pprint
import re
import six
class CCEJobSpec:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'cluster_uid': 'str',
'extend_param': 'dict(str, str)',
'resource_id': 'str',
'resource_name': 'str',
'sub_jobs': 'list[CCEJob]',
'type': 'str'
}
attribute_map = {
'cluster_uid': 'clusterUID',
'extend_param': 'extendParam',
'resource_id': 'resourceID',
'resource_name': 'resourceName',
'sub_jobs': 'subJobs',
'type': 'type'
}
def __init__(self, cluster_uid=None, extend_param=None, resource_id=None, resource_name=None, sub_jobs=None, type=None):
"""CCEJobSpec - a model defined in huaweicloud sdk"""
self._cluster_uid = None
self._extend_param = None
self._resource_id = None
self._resource_name = None
self._sub_jobs = None
self._type = None
self.discriminator = None
if cluster_uid is not None:
self.cluster_uid = cluster_uid
if extend_param is not None:
self.extend_param = extend_param
if resource_id is not None:
self.resource_id = resource_id
if resource_name is not None:
self.resource_name = resource_name
if sub_jobs is not None:
self.sub_jobs = sub_jobs
if type is not None:
self.type = type
@property
def cluster_uid(self):
"""Gets the cluster_uid of this CCEJobSpec.
作业所在的集群的ID。
:return: The cluster_uid of this CCEJobSpec.
:rtype: str
"""
return self._cluster_uid
@cluster_uid.setter
def cluster_uid(self, cluster_uid):
"""Sets the cluster_uid of this CCEJobSpec.
作业所在的集群的ID。
:param cluster_uid: The cluster_uid of this CCEJobSpec.
:type: str
"""
self._cluster_uid = cluster_uid
@property
def extend_param(self):
"""Gets the extend_param of this CCEJobSpec.
扩展参数。
:return: The extend_param of this CCEJobSpec.
:rtype: dict(str, str)
"""
return self._extend_param
@extend_param.setter
def extend_param(self, extend_param):
"""Sets the extend_param of this CCEJobSpec.
扩展参数。
:param extend_param: The extend_param of this CCEJobSpec.
:type: dict(str, str)
"""
self._extend_param = extend_param
@property
def resource_id(self):
"""Gets the resource_id of this CCEJobSpec.
作业操作的资源ID。
:return: The resource_id of this CCEJobSpec.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this CCEJobSpec.
作业操作的资源ID。
:param resource_id: The resource_id of this CCEJobSpec.
:type: str
"""
self._resource_id = resource_id
@property
def resource_name(self):
"""Gets the resource_name of this CCEJobSpec.
作业操作的资源名称。
:return: The resource_name of this CCEJobSpec.
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""Sets the resource_name of this CCEJobSpec.
作业操作的资源名称。
:param resource_name: The resource_name of this CCEJobSpec.
:type: str
"""
self._resource_name = resource_name
@property
def sub_jobs(self):
"""Gets the sub_jobs of this CCEJobSpec.
子作业的列表。 - 包含了所有子作业的详细信息 - 在创建集群、节点等场景下,通常会由多个子作业共同组成创建作业,在子作业都完成后,作业才会完成
:return: The sub_jobs of this CCEJobSpec.
:rtype: list[CCEJob]
"""
return self._sub_jobs
@sub_jobs.setter
def sub_jobs(self, sub_jobs):
"""Sets the sub_jobs of this CCEJobSpec.
子作业的列表。 - 包含了所有子作业的详细信息 - 在创建集群、节点等场景下,通常会由多个子作业共同组成创建作业,在子作业都完成后,作业才会完成
:param sub_jobs: The sub_jobs of this CCEJobSpec.
:type: list[CCEJob]
"""
self._sub_jobs = sub_jobs
@property
def type(self):
"""Gets the type of this CCEJobSpec.
作业的类型,例:“CreateCluster”- 创建集群。
:return: The type of this CCEJobSpec.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CCEJobSpec.
作业的类型,例:“CreateCluster”- 创建集群。
:param type: The type of this CCEJobSpec.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CCEJobSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
bec2ac58d9e5663e3e2e4ecdcd53faa846e92d59 | e65ae5bd9ae1c93e7117e630f7340bc73aa71212 | /lib/gevent/greentest/test__greenletset.py | ffe4e308e45094f2aa246d25c391c328edcf36aa | [
"MIT"
] | permissive | nadirhamid/oneline | e98ff1ed81da0536f9602ecdde2fb2a4fe80d256 | 833ebef0e26ae8e0cc452756381227746d830b23 | refs/heads/master | 2021-01-21T04:27:41.715047 | 2016-05-30T03:50:34 | 2016-05-30T03:50:34 | 23,320,578 | 1 | 2 | NOASSERTION | 2020-03-12T17:22:24 | 2014-08-25T16:29:36 | Python | UTF-8 | Python | false | false | 3,222 | py | import time
import greentest
import gevent
from gevent import pool
DELAY = 0.1
class SpecialError(Exception):
pass
class Undead(object):
def __init__(self):
self.shot_count = 0
def __call__(self):
while True:
try:
gevent.sleep(1)
except SpecialError:
break
except:
self.shot_count += 1
class Test(greentest.TestCase):
def test_basic(self):
DELAY = 0.05
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
assert len(s) == 1, s
s.spawn(gevent.sleep, DELAY * 2.)
assert len(s) == 2, s
gevent.sleep(DELAY * 3. / 2.)
assert len(s) == 1, s
gevent.sleep(DELAY)
assert not s, s
def test_waitall(self):
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
s.spawn(gevent.sleep, DELAY * 2)
assert len(s) == 2, s
start = time.time()
s.join(raise_error=True)
delta = time.time() - start
assert not s, s
assert len(s) == 0, s
assert DELAY * 1.9 <= delta <= DELAY * 2.5, (delta, DELAY)
def test_kill_block(self):
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
s.spawn(gevent.sleep, DELAY * 2)
assert len(s) == 2, s
start = time.time()
s.kill()
assert not s, s
assert len(s) == 0, s
delta = time.time() - start
assert delta < DELAY * 0.8, delta
def test_kill_noblock(self):
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
s.spawn(gevent.sleep, DELAY * 2)
assert len(s) == 2, s
s.kill(block=False)
assert len(s) == 2, s
gevent.sleep(0.0001)
assert len(s) == 0, s
assert not s, s
def test_kill_fires_once(self):
u1 = Undead()
u2 = Undead()
p1 = gevent.spawn(u1)
p2 = gevent.spawn(u2)
def check(count1, count2):
assert p1, p1
assert p2, p2
assert not p1.dead, p1
assert not p2.dead, p2
self.assertEqual(u1.shot_count, count1)
self.assertEqual(u2.shot_count, count2)
gevent.sleep(0.01)
s = pool.Group([p1, p2])
assert len(s) == 2, s
check(0, 0)
s.killone(p1, block=False)
check(0, 0)
gevent.sleep(0)
check(1, 0)
s.killone(p1)
check(1, 0)
s.killone(p1)
check(1, 0)
s.kill(block=False)
s.kill(block=False)
s.kill(block=False)
check(1, 0)
gevent.sleep(DELAY)
check(1, 1)
X = object()
kill_result = gevent.with_timeout(DELAY, s.kill, block=True, timeout_value=X)
assert kill_result is X, repr(kill_result)
assert len(s) == 2, s
check(1, 1)
p1.kill(SpecialError)
p2.kill(SpecialError)
def test_killall_subclass(self):
p1 = GreenletSubclass.spawn(lambda: 1 / 0)
p2 = GreenletSubclass.spawn(lambda: gevent.sleep(10))
s = pool.Group([p1, p2])
s.kill()
class GreenletSubclass(gevent.Greenlet):
pass
if __name__ == '__main__':
greentest.main()
| [
"matrix.nad@gmail.com"
] | matrix.nad@gmail.com |
26a281cbfa087655cdab4c27c6fd0c9a2db3e8f2 | 77da9a5aac6598df2dea6088e068e973e3da52ad | /benchmarks/1d16pu/config.py | b5869320a3d278e60c3593dff0db31f503088bc8 | [] | no_license | GiggleLiu/projectx | e9e5960d5892c4efcad0a0a7cc5e7ff72ca50458 | 170f990939549949de203004c03ed68762ba23b4 | refs/heads/master | 2021-09-08T17:28:22.473021 | 2018-03-11T10:32:32 | 2018-03-11T10:32:32 | 109,708,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | import numpy as np
from models.wanglei6 import WangLei6
powerlist_list = [
[[1,0,1],[1,1]],
[[1,0,1],[1,1]],
[[1,0,1],[1,1]],
[[1,0,1],[1,1]],
[[1,0,1],[1,1]],
[[1,0,1],[1,1]],
]
num_features_list = [
[20], [20], [20], [20], [20],
[20], [20], [20], [20], [20],
]
nonlinear_list_list = [
['sinh','none','none'],
['sinh','none','none'],
]
def modifyconfig_and_getnn(config, bench_id):
is_unitary = False
momentum = 0
eta0 = 0.2
eta1 = 0.2
NP = 0
NC = 1
itype = 'complex128'
poly_order = 10
usesum = False
powerlist = powerlist_list[bench_id]
num_features = num_features_list[bench_id]
nonlinear_list = nonlinear_list_list[bench_id]
soften_gradient = False
if bench_id == 1:
momentum=np.pi
if momentum==0: # not ground state
config['hamiltonian']['EG'] = -6.6889395
rbm = WangLei6(input_shape=tuple(config['hamiltonian']['size']), num_features=num_features,
itype=itype,dtype0=itype, dtype1=itype, powerlist=powerlist,
usesum=usesum, nonlinear_list=nonlinear_list, poly_order=poly_order, do_BN=False,
momentum=momentum, eta0=eta0, eta1=eta1, NP=NP, NC=NC,is_unitary=is_unitary,
soften_gradient = soften_gradient)
return rbm
| [
"cacate0129@gmail.com"
] | cacate0129@gmail.com |
33cfc768934e8033b6bdcf3758e69a8c15adb0b0 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/opcodes/cases/test_map_mem_nat_100.py | 68ed860f3164c948df0a2f3f9efad4284fc8d31b | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 886 | py | from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestmap_mem_nat_100(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_map_mem_nat_100(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/map_mem_nat.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN 1 (Pair { Elt 1 0 } None)')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('(Pair { Elt 1 0 } (Some True))')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
eebf77058bb72e39adc7e144c70efde6975f3eb4 | c380e7c61c97cb03531e4f33fe31c99c7593c70b | /contrib/splunk-sdk-python/examples/explorer/explorer.py | ae4f6d06e2597a0764a43d3e6d0bccc93b209569 | [
"Apache-2.0"
] | permissive | skada/splunk-appframework | 09bd777bc7c28b6b460eb8b7397288b559d9bf3d | 8d07f81a9c37b8a0a2b9432bdd6fd78e98e458cc | refs/heads/master | 2020-12-01T01:17:03.481885 | 2013-03-09T05:51:27 | 2013-03-09T05:51:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,029 | py | #!/usr/bin/env python
#
# Copyright 2011-2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import server
import webbrowser
import sys
import os
sys.path.insert(0, '../../') # Use splunklib and utils without installing
import utils
import urllib
PORT = 8080
def main(argv):
usage = "usage: %prog [options]"
redirect_port_args = {
"redirectport": {
"flags": ["--redirectport"],
"default": PORT,
"help": "Port to use for redirect server (default: %s)" % PORT,
},
}
opts = utils.parse(argv, redirect_port_args, ".splunkrc", usage=usage)
args = [("scheme", opts.kwargs["scheme"]),
("host", opts.kwargs["host"]),
("port", opts.kwargs["port"]),
("redirecthost", "localhost"),
("redirectport", opts.kwargs["redirectport"]),
("username", opts.kwargs["username"]),
("password", opts.kwargs["password"])]
if 'app' in opts.kwargs.keys():
args.append(('app', opts.kwargs['app']))
if 'owner' in opts.kwargs.keys():
args.append(('owner', opts.kwargs['owner']))
# Encode these arguments
args = urllib.urlencode(args)
# Launch the browser
webbrowser.open("file://%s" % os.path.join(os.getcwd(), "explorer.html?%s" % args))
# And server the files
server.serve(opts.kwargs["redirectport"])
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
pass
except:
raise
| [
"itay@neeman.net"
] | itay@neeman.net |
973003a6f81d1d8e405f3dfccf3199500bca22db | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_0833+211/sdB_PG_0833+211_lc.py | 04021942ce6aaaa0c17094607e8f65d9c8e1e284 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[129.082042,20.963314], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_0833+211 /sdB_PG_0833+211_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
ae9fd9989ac06924eaff500bcc8d0d390707e66e | cbd865bdba079069ba52e4bf78dd1395acb99d5b | /30.py | 08bb5c96aa2900c43d824aea65e74eae9541e17a | [] | no_license | anhnguyendepocen/100-pythonExercises | 52e72c214885e993207241b28124382365f28126 | 1f69184ba819b1a9d3880530aa349ae677dc1254 | refs/heads/master | 2022-01-17T14:00:16.390389 | 2019-03-23T19:22:13 | 2019-03-23T19:22:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # Exercise No.30
# Why is there an error here?
# def foo(a=2, b):
# return a + b
# Solution
# Because default arguments must be listed at the end of the parameters list. If you list them at the beginning python
# will get confused because it is not sure if you are changing the default argument's value or passing the second
# argument.
| [
"eduardessc0@hotmail.com"
] | eduardessc0@hotmail.com |
aaeaef1286a253db035fa39dbb376b08736d9761 | aa9afb14c2e0871afc200d2a7e981334ae97974a | /item_engine/builders/package/mb1.py | aa7e39d194dd52ee9a7a12ae037540dfd42744ad | [
"MIT"
] | permissive | GabrielAmare/TextEngine | bd53357526cb0e31f5aae80dacfa02b004683017 | 39ceb323a63af35e32c4be34ae35a77e811bc973 | refs/heads/main | 2023-05-01T13:03:54.929892 | 2021-05-21T07:44:56 | 2021-05-21T07:44:56 | 344,866,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from item_engine.textbase.elements import Char, Token
from typing import Iterator
def function(src: Iterator[Char]) -> Iterator[Token]:
cur: Token = Token(at=0, to=0, value=0)
for old in src:
while cur.to == old.at:
new: Token = cur.develop(_function(cur, old), old)
if not new.is_terminal:
cur = new
continue
if new.is_valid:
cur = Token(at=new.to, to=new.to, value=0)
yield new
continue
if old.value == 'EOF':
yield Token.EOF(old.to)
break
raise SyntaxError((cur, old, new))
| [
"67603418+GabrielAmare@users.noreply.github.com"
] | 67603418+GabrielAmare@users.noreply.github.com |
8b262c3bfa84526014f073bef31780e8a33dcc89 | 04dbbfea381996e207f4ff65ce1fc5a297cd2b79 | /MIT_ocw/6.006F11/JUNK/timing_exc.py | 8c9685bec8e99aa00d62a059bf8762dd3773c80d | [] | no_license | Ru0ch3n/MOOCs | 30081d1905ed5916bcbad640a30cdace3a1e3ce0 | 93acda7879cb5fd96cddb5be9148bb22e025fbb7 | refs/heads/master | 2021-09-21T16:34:31.536725 | 2018-08-28T18:05:48 | 2018-08-28T18:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,475 | py | import math
import string
import timeit
import scipy.optimize
# Parameter generation routines
def lg(x):
return math.log(x)/math.log(2.0)
def sqrt(x):
return math.sqrt(x)
def make_param_list(spec_string,growth_factor):
"""
Generate a list of dictionaries
given maximum and minimum values for each range.
Each min and max value is a *string* that can be evaluted;
each string may depend on earlier variable values
Values increment by factor of growth_factor from min to max
Example:
make_param_list("1<=n<=1000")
make_param_list("1<=n<=1000;1<=m<=1000;min(n,m)<=k<=max(n,m)")
"""
var_list = []
spec_list = string.split(spec_string,";")
D = {}
D['lg']=lg
D['sqrt'] = sqrt
D_list = [D]
for spec in spec_list:
spec_parts = string.split(spec,"<=")
assert len(spec_parts)==3
lower_spec = spec_parts[0]
var_name = spec_parts[1]
assert len(var_name)==1
var_list.append(var_name)
upper_spec = spec_parts[2]
new_D_list = []
for D in D_list:
new_D = D.copy()
val = eval(lower_spec,D)
while val<=eval(upper_spec,D):
new_D[var_name] = val
new_D_list.append(new_D.copy())
val *= growth_factor
D_list = new_D_list
return (var_list,D_list)
def fit(var_list,param_list,run_times,f_list):
"""
Return matrix A needed for least-squares fit.
Given:
list of variable names
list of sample dicts for various parameter sets
list of corresponding run times
list of functions to be considered for fit
these are *strings*, e.g. "n","n**2","min(n,m)",etc.
prints:
coefficients for each function in f_list
"""
print "var_list",var_list
print "Function list:",f_list
print "run times:",
for i in range(len(param_list)):
print
for v in var_list:
print v,"= %6s"%param_list[i][v],
print ": %8f"%run_times[i],"microseconds",
print
rows = len(run_times)
cols = len(f_list)
A = [ [0 for j in range(cols)] for i in range(rows) ]
for i in range(rows):
D = param_list[i]
for j in range(cols):
A[i][j] = float(eval(f_list[j],D))
b = run_times
(x,resids,rank,s) = fit2(A,b)
print "Coefficients as interpolated from data:"
for j in range(cols):
sign = ''
if x[j]>0 and j>0:
sign="+"
elif x[j]>0:
sign = " "
print "%s%g*%s"%(sign,x[j],f_list[j])
print "(measuring time in microseconds)"
print "Sum of squares of residuals:",resids
print "RMS error = %0.2g percent"%(math.sqrt(resids/len(A))*100.0)
def fit2(A,b):
""" Relative error minimizer """
def f(x):
assert len(x) == len(A[0])
resids = []
for i in range(len(A)):
sum = 0.0
for j in range(len(A[0])):
sum += A[i][j]*x[j]
relative_error = (sum-b[i])/b[i]
resids.append(relative_error)
return resids
ans = scipy.optimize.leastsq(f,[0.0]*len(A[0]))
# print "ans:",ans
if len(A[0])==1:
x = [ans[0]]
else:
x = ans[0]
resids = sum([r*r for r in f(x)])
return (x,resids,0,0)
# def int2str(num):
# result = ''
# while num > 0:
# result += str(num %10);
# num /= 10
# return result[::-1]
int2str = """\
def int2str(num):
result = ''
while num > 0:
result += str(num %10);
num /= 10
return result[::-1]
"""
def test_number():
print
print "Test Number-1 -- time to compute int('1'*n)"
spec_string = "1000<=n<=10000"
growth_factor = 2
print "Spec_string: ",spec_string,"by factors of",growth_factor
var_list, param_list = make_param_list(spec_string,growth_factor)
f_list = ("n**2","n","1")
run_times = []
trials = 1000
for D in param_list:
t = timeit.Timer("string.atoi(x)","import string;x='1'*%(n)s"%D)
run_times.append(t.timeit(trials)*1e6/float(trials))
fit(var_list,param_list,run_times,f_list)
f_list = ("n","1")
fit(var_list,param_list,run_times,f_list)
print
print "Test Number-2 -- time to compute repr(2**n)"
spec_string = "1000<=n<=10000"
growth_factor = 2
print "Spec_string: ",spec_string,"by factors of",growth_factor
var_list, param_list = make_param_list(spec_string,growth_factor)
f_list = ("n**2","n","1")
run_times = []
trials = 1000
for D in param_list:
t = timeit.Timer("repr(x)","x=2**%(n)s"%D)
run_times.append(t.timeit(trials)*1e6/float(trials))
fit(var_list,param_list,run_times,f_list)
f_list = ("n","1")
fit(var_list,param_list,run_times,f_list)
print
print "Test Number-3 -- time to compute int2str(2**n)"
spec_string = "1000<=n<=10000"
growth_factor = 2
print "Spec_string: ",spec_string,"by factors of",growth_factor
var_list, param_list = make_param_list(spec_string,growth_factor)
f_list = ("n**2","n","1")
run_times = []
trials = 1000
for D in param_list:
t = timeit.Timer("int2str(x)", int2str+"x=2**%(n)s"%D)
run_times.append(t.timeit(trials)*1e6/float(trials))
fit(var_list,param_list,run_times,f_list)
f_list = ("n","1")
fit(var_list,param_list,run_times,f_list)
if __name__ == '__main__':
test_number()
| [
"cc.rafaz@zafar.cc"
] | cc.rafaz@zafar.cc |
dc2171d0ee0d6fe1239997e6fb0b4e5ca1c6b440 | 38558ac2e78837e7f975364f03a1f55fb02103af | /PRA practice/PRA S.py | b414adcbe405d0e17e933b2fa4b89f09bb0e79b5 | [] | no_license | SOURADEEP-DONNY/WORKING-WITH-PYTHON | a0bc2ff5ddab1b25563927c8f361c6512683d6ff | 5198d14f0711a3ba7f2fe8bac61d6404c20ea40c | refs/heads/master | 2023-07-14T04:49:08.399519 | 2021-08-29T15:22:33 | 2021-08-29T15:22:33 | 270,723,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | class Employee:
def __init__(self,emp_id,emp_name,role):
self.employeeId=emp_id
self.employeeName=emp_name
self.geInRole=role
self.status="In Service"
class Organization:
def __init__(self,emp_list):
self.employeeList=emp_list
def updateEmployeeStatus(self,noOfYears):
for i in self.employeeList:
if i.geInRole > noOfYears:
i.status="Retirement Due"
return self.employeeList
def countEmployees(self):
count=0
for i in self.employeeList:
if i.status=="Retirement Due":
count+=1
return count
if __name__=="__main__":
num=int (input())
emp_list=[]
for _ in range(num):
id=int(input())
name=input()
role=int(input())
emp_list.append(Employee(id,name,role))
obj=Organization(emp_list)
noOfYears=int(input())
result1=obj.updateEmployeeStatus(noOfYears)
result2=obj.countEmployees()
if(result2>0):
print("Count of employee updated=",result2)
else:
print("No employee updated")
for i in result1:
print(i.employeeId,i.employeeName,i.status)
| [
"noreply@github.com"
] | SOURADEEP-DONNY.noreply@github.com |
f72b33efb196f6401894f4994d0802946bc39515 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/gkehub/v1alpha/gkehub-v1alpha-py/google/cloud/gkehub/servicemesh_v1alpha/types/servicemesh.py | c524b4cb240d7c3bf95c02aa269442cef01aa673 | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,838 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.gkehub.servicemesh.v1alpha',
manifest={
'FeatureState',
'MembershipState',
'AnalysisMessageBase',
'AnalysisMessage',
},
)
class FeatureState(proto.Message):
r"""**Service Mesh**: State for the whole Hub, as analyzed by the
Service Mesh Hub Controller.
Attributes:
analysis_messages (Sequence[google.cloud.gkehub.servicemesh_v1alpha.types.AnalysisMessage]):
Output only. Results of running Service Mesh
analyzers.
"""
analysis_messages = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='AnalysisMessage',
)
class MembershipState(proto.Message):
r"""**Service Mesh**: State for a single Membership, as analyzed by the
Service Mesh Hub Controller.
Attributes:
analysis_messages (Sequence[google.cloud.gkehub.servicemesh_v1alpha.types.AnalysisMessage]):
Output only. Results of running Service Mesh
analyzers.
"""
analysis_messages = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='AnalysisMessage',
)
class AnalysisMessageBase(proto.Message):
r"""AnalysisMessageBase describes some common information that is
needed for all messages.
Attributes:
type_ (google.cloud.gkehub.servicemesh_v1alpha.types.AnalysisMessageBase.Type):
Represents the specific type of a message.
level (google.cloud.gkehub.servicemesh_v1alpha.types.AnalysisMessageBase.Level):
Represents how severe a message is.
documentation_url (str):
A url pointing to the Service Mesh or Istio
documentation for this specific error type.
"""
class Level(proto.Enum):
r"""The values here are chosen so that more severe messages get
sorted higher, as well as leaving space in between to add more
later See istio.analysis.v1alpha1.AnalysisMessageBase.Level
"""
LEVEL_UNSPECIFIED = 0
ERROR = 3
WARNING = 8
INFO = 12
class Type(proto.Message):
r"""A unique identifier for the type of message. Display_name is
intended to be human-readable, code is intended to be machine
readable. There should be a one-to-one mapping between display_name
and code. (i.e. do not re-use display_names or codes between message
types.) See istio.analysis.v1alpha1.AnalysisMessageBase.Type
Attributes:
display_name (str):
A human-readable name for the message type. e.g.
"InternalError", "PodMissingProxy". This should be the same
for all messages of the same type. (This corresponds to the
``name`` field in open-source Istio.)
code (str):
A 7 character code matching ``^IST[0-9]{4}$`` or
``^ASM[0-9]{4}$``, intended to uniquely identify the message
type. (e.g. "IST0001" is mapped to the "InternalError"
message type.)
"""
display_name = proto.Field(
proto.STRING,
number=1,
)
code = proto.Field(
proto.STRING,
number=2,
)
type_ = proto.Field(
proto.MESSAGE,
number=1,
message=Type,
)
level = proto.Field(
proto.ENUM,
number=2,
enum=Level,
)
documentation_url = proto.Field(
proto.STRING,
number=3,
)
class AnalysisMessage(proto.Message):
r"""AnalysisMessage is a single message produced by an analyzer,
and it used to communicate to the end user about the state of
their Service Mesh configuration.
Attributes:
message_base (google.cloud.gkehub.servicemesh_v1alpha.types.AnalysisMessageBase):
Details common to all types of Istio and
ServiceMesh analysis messages.
description (str):
A human readable description of what the
error means. It is suitable for non-
internationalize display purposes.
resource_paths (Sequence[str]):
A list of strings specifying the resource identifiers that
were the cause of message generation. A "path" here may be:
- MEMBERSHIP_ID if the cause is a specific member cluster
- MEMBERSHIP_ID/(NAMESPACE/)?RESOURCETYPE/NAME if the cause
is a resource in a cluster
args (google.protobuf.struct_pb2.Struct):
A UI can combine these args with a template (based on
message_base.type) to produce an internationalized message.
"""
message_base = proto.Field(
proto.MESSAGE,
number=1,
message='AnalysisMessageBase',
)
description = proto.Field(
proto.STRING,
number=2,
)
resource_paths = proto.RepeatedField(
proto.STRING,
number=3,
)
args = proto.Field(
proto.MESSAGE,
number=4,
message=struct_pb2.Struct,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
23d273d98f2e161c75df76b438884487fc3cedd3 | c459f4dd7b198ec8d8db8379726a5b2650be6636 | /backoffice/migrations/0027_auto_20210224_1621.py | 7500f1e83101678b90c23d489885f249201fc720 | [] | no_license | jittat/admapp | 4c712182cd06e82efab6c2513fb865e5d00feae8 | 38bf299015ae423b4551f6b1206742ee176b8b77 | refs/heads/master | 2023-06-10T03:23:41.174264 | 2023-06-09T19:41:03 | 2023-06-09T19:41:03 | 101,953,724 | 10 | 4 | null | 2023-04-21T22:48:55 | 2017-08-31T03:12:04 | Python | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.2.17 on 2021-02-24 16:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0026_auto_20210224_1550'),
]
operations = [
migrations.AlterField(
model_name='adjustmentmajorslot',
name='major_full_code',
field=models.CharField(max_length=20),
),
]
| [
"jittat@gmail.com"
] | jittat@gmail.com |
b5aaaf013e3bf2e723a3e6318ca85c11b84631ca | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/428/usersdata/284/106789/submittedfiles/jogoDaVelha.py | 3a3449d0234c2f307a0dbc52ba6f670be5669645 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,279 | py | # -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
#definir entrada em lista:
import random
def solicitaSimboloDoHumano():
letra = 0
while not (letra == 'O' or letra == 'X'):
print('Qual símbolo você deseja utilizar no jogo? (X ou O) ')
letra = input().upper()
if letra == 'X':
return ['X','O']
else:
return ['O','X']
def sorteioPrimeiraJogada():
if random.randint(1,2) == 1:
return 'Computador'
else:
return 'Jogador'
#movimento em forma de vetor/matriz:
def jogadaHumana(tabuleiro):
movimento = 0
while movimento not in '1 2 3 4 5 6 7 8 9'.split() or not vazio(tabuleiro, int(movimento)):
print('Qual a sua jogada, {}?'.format(nome))
movimento = input()
if movimento == 0 0:
movimento = 1
return int(movimento)
#função com relação a matrizes:
def jogadaComputador(tabuleiro, letraComputador):
if letraComputador == 'X':
letraJogador = 'O'
else:
letraJogador = 'X'
for i in range(1,10):
copy = mostraTabuleiro(tabuleiro)
if vazio(copy, i):
movimentacao(copy, letraComputador, i)
if verificaVencedor(copy, letraComputador):
return i
for i in range(1, 10):
copy = mostraTabuleiro(tabuleiro)
if vazio(copy, i):
movimentacao(copy, letraJogador, i)
if verificaVencedor(copy, letraJogador):
return i
movimento = movAleatoria(tabuleiro, [1, 3, 7, 9])
if movimento != None:
return movimento
if vazio(tabuleiro, 5):
return 5
return movAleatoria(tabuleiro, [2, 4, 6, 8])
#def validaJogada()
def mostraTabuleiro(tabuleiro):
dupeTabuleiro = []
for i in tabuleiro:
dupeTabuleiro.append(i)
return dupeTabuleiro
def verificaVencedor(tabuleiro, letra):
return ((tabuleiro[7] == letra and tabuleiro[8] == letra and tabuleiro[9] == letra) or
(tabuleiro[4] == letra and tabuleiro[5] == letra and tabuleiro[6] == letra) or
(tabuleiro[1] == letra and tabuleiro[2] == letra and tabuleiro[3] == letra) or
(tabuleiro[7] == letra and tabuleiro[4] == letra and tabuleiro[1] == letra) or
(tabuleiro[8] == letra and tabuleiro[5] == letra and tabuleiro[2] == letra) or
(tabuleiro[9] == letra and tabuleiro[6] == letra and tabuleiro[3] == letra) or
(tabuleiro[7] == letra and tabuleiro[5] == letra and tabuleiro[3] == letra) or
(tabuleiro[9] == letra and tabuleiro[5] == letra and tabuleiro[1] == letra))
#################################################################################
def vazio(tabuleiro, movimento):
return tabuleiro[movimento] == ' '
def desenhaTabuleiro(tabuleiro):
print(' ' + tabuleiro[7] + ' | ' + tabuleiro[8] + ' | ' + tabuleiro[9])
print(' ' + tabuleiro[4] + ' | ' + tabuleiro[5] + ' | ' + tabuleiro[6])
print(' ' + tabuleiro[1] + ' | ' + tabuleiro[2] + ' | ' + tabuleiro[3])
def jogarNovamente():
print('Você deseja jogar novamente?(sim ou não)')
return input().lower().startswith('sim')
def movimentacao(tabuleiro, letra, movimento):
tabuleiro[movimento] = letra
def movAleatoria(tabuleiro, movimentosList):
movPossiveis = []
for i in movimentosList:
if vazio(tabuleiro, i):
movPossiveis.append(i)
if len(movPossiveis) != 0:
return random.choice(movPossiveis)
else:
return None
def completo(tabuleiro):
for i in range(1, 10):
if vazio(tabuleiro, i):
return False
return True
print('Bem vindo ao JogoDaVelha do grupo X')
nome = input('Qual o seu nome (ou apelido)? ')
while True:
tabul = [' '] * 10
letraJogador, letraComputador = solicitaSimboloDoHumano()
turn = sorteioPrimeiraJogada()
print('Vencedor do sorteio para início do jogo: {}'.format(turn))
rodando = True
while rodando:
if turn == 'Jogador':
desenhaTabuleiro(tabul)
movimento = jogadaHumana(tabul)
movimentacao(tabul, letraJogador, movimento)
if verificaVencedor(tabul, letraJogador):
desenhaTabuleiro(tabul)
print('Vencedor: {}'.format(nome))
rodando = False
else:
if completo(tabul):
desenhaTabuleiro(tabul)
print('Deu Velha!')
break
else:
turn = 'Computador'
else:
movimento = jogadaComputador(tabul, letraComputador)
movimentacao(tabul, letraComputador, movimento)
if verificaVencedor(tabul, letraComputador):
desenhaTabuleiro(tabul)
print('Vencedor: Computador')
rodando = False
else:
if completo(tabul):
desenhaTabuleiro(tabul)
print('Deu Velha!')
break
else:
turn = 'Jogador'
if not jogarNovamente():
break
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f3f8bb5d1c5a47a51aafed60b97ba43063db2582 | 6380a784ff2bbae2ffa67a50757bf0bb3bd7e87c | /config/api_router.py | a12bee390b55446679072d6b59145e56981ec515 | [] | no_license | mahidul-islam/rijal | fe07a1024ba2b430569b4f91abcd275958297013 | d31e8548ff69438b7e1b0f49468f367be15533a9 | refs/heads/master | 2022-11-21T14:23:25.446961 | 2020-07-21T07:10:17 | 2020-07-21T07:10:17 | 281,323,224 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from arrijal.users.api.views import UserViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
app_name = "api"
urlpatterns = router.urls
| [
"mizihan84@gmail.com"
] | mizihan84@gmail.com |
745a7bddd4f8ff7dd570dc5adce795e85124f73a | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /tf3d/ops/tensorflow_sparse_conv_ops/sparse_conv_ops_test.py | 5f0edf1bbfee5b82233daf983f0fb915f45ef112 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,803 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test sparse_conv ops."""
import numpy as np
import tensorflow as tf
try:
import tensorflow_sparse_conv_ops as sparse_conv_ops # pylint: disable=g-import-not-at-top
except ImportError:
import sparse_conv_ops # pylint: disable=g-import-not-at-top
class SparseConvOpTest(tf.test.TestCase):
"""Test sparse_conv ops."""
def test_spar_conv_op(self):
voxel_features = tf.constant(
[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 2.0, 8.0]]],
dtype=tf.float32)
voxel_xyz_indices = tf.constant(
[[[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]], dtype=tf.int32)
num_valid_voxels = tf.constant([4], dtype=tf.int32)
init_value = np.ones([3, 3, 3, 3, 5], np.float32)
filters = tf.Variable(initial_value=init_value, trainable=True)
with tf.GradientTape() as g:
voxel_outputs = sparse_conv_ops.submanifold_sparse_conv3d(
voxel_xyz_indices, num_valid_voxels, voxel_features, filters)
print('voxel_outputs:', voxel_outputs)
self.assertAllEqual(voxel_outputs.shape, [1, 4, 5])
self.assertAllEqual(
g.gradient(voxel_outputs, filters).shape, [3, 3, 3, 3, 5])
if __name__ == '__main__':
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
d5831481809bc39418d1870a746d55b3c2d993ab | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/third_party_product.py | 50ae347ef65cbaf67a7ff980b403457e18ba01ef | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 309 | py | from dataclasses import dataclass
from .third_party_product_version_structure import ThirdPartyProductVersionStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class ThirdPartyProduct(ThirdPartyProductVersionStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
| [
"chris@komposta.net"
] | chris@komposta.net |
41550f34d2705294a0046ac8b1de8397166aec24 | 73332abdcadb62f4f262d0c30856c3c257a9ee7d | /oyProjectManager/models/repository.py | e652584bc54fa7228d4ef379aa506eb227c11021 | [
"BSD-2-Clause"
] | permissive | code-google-com/oyprojectmanager | 454435604cc150c1b54ec2c54294e0fa05490f82 | 3085ecbe1cc04a73ec69b4848b789009546feae7 | refs/heads/master | 2021-01-19T02:40:56.342086 | 2015-01-26T16:40:00 | 2015-01-26T16:40:00 | 32,266,400 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,653 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
from exceptions import AttributeError, RuntimeError, ValueError, IOError
import os
from oyProjectManager import utils
from oyProjectManager.utils import cache
# create a logger
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
# TODO: Remove Repository Class, it is useless
class Repository(object):
"""Repository class gives information about the repository and projects in
that repository.
The Repository class helps:
* Get a list of project names in the current repository
* Find server paths
* and some auxiliary things like:
* convert the given path to repository relative path which contains
the environment variable key in the repository path.
In the current design of the system there can only be one repository where
all the projects are saved on. It is a little bit hard or adventurous to
build a system which supports multiple repositories.
.. note::
In future may there be support for multiple repositories by using
repository specific environment variables, like $REPO1 for repository in
the first index of the config.repository settings and $REPO2 for the
second and etc. But in the current design it was a little bit an overkill
to add this support.
.. warning::
The repository setting (``repository``) in the users own config.py file
is useless for getting the repository path. It is the $REPO environment
variable that oyProjectManager uses. The ``repository`` setting in the
``config.py`` is there to be able replace the path values for one
operating system in another, for example, think that a path for a texture
file is set to "/mnt/Projects/TestProject/Texture1". This
is obviously a path for OSX or linux, but what happens when you are under
Windows and open the file, in this case oyProjectManager will try to
replace the path with the environment variable by checking if the path
matches any of the oses repository path settings and it will reproduce
the path as "$REPO/TestProject" in case the repository settings is
"/mnt/Projects" for OSX.
There are no parameters that needs to be set to initialize a Repository
instance.
"""
def __init__(self):
logger.debug("initializing repository instance")
# get the config
from oyProjectManager import conf
self.conf = conf
self._server_path = ""
self._windows_path = ""
self._osx_path = ""
self._linux_path = ""
self._project_names = []
self._validate_repository_env_key()
# -----------------------------------------------------
# read the repository settings and assign the defaults
try:
self._windows_path = \
self.conf.repository["windows_path"].replace("\\", "/")
except AttributeError:
pass
try:
self._linux_path = \
self.conf.repository["linux_path"].replace("\\", "/")
except AttributeError:
pass
try:
self._osx_path = \
self.conf.repository["osx_path"].replace("\\", "/")
except AttributeError:
pass
logger.debug("finished initializing repository instance")
def _validate_repository_env_key(self):
"""validates the repository env key environment variable
"""
# raise a RuntimeError if no REPO environment var is set
if not os.environ.has_key(self.conf.repository_env_key):
raise RuntimeError("Please set an environment variable with the "
"name %s and set it to your repository path" %
self.conf.repository_env_key)
if os.environ[self.conf.repository_env_key] == "":
raise ValueError("The %s environment variable can not be an "
"empty string" % self.conf.repository_env_key)
# @property
# @bCache.cache()
@cache.CachedMethod
@property
def project_names(self):
"""returns a list of project names
"""
self.update_project_list()
return self._project_names
def update_project_list(self):
"""updates the project list variable
"""
logger.debug("updating projects list")
try:
self._project_names = []
child_folders = utils.getChildFolders(self.server_path)
for folder in child_folders:
# check if the .metadata.db file exists under the folder
if os.path.exists(
os.path.join(
self.server_path,
folder,
self.conf.database_file_name
)
):
# it should be a valid project
self._project_names.append(folder)
self._project_names.sort()
except IOError:
logger.warning("server path doesn't exists, %s" % self.server_path)
@property
def server_path(self):
"""The server path
"""
return os.path.expandvars(
os.path.expandvars(
os.path.expanduser(
os.environ[self.conf.repository_env_key]
)
)
)
@property
def linux_path(self):
return self._linux_path.replace("\\", "/")
@property
def windows_path(self):
"""The windows path of the jobs server
"""
return self._windows_path.replace("\\", "/")
@property
def osx_path(self):
"""The osx path of the jobs server
"""
return self._osx_path.replace("\\", "/")
def get_project_name(self, file_path):
"""Returns the project name from the given path or full path.
Calculates the project name from the given file or folder full path.
It returns None if it can not get a suitable name.
:param str file_path: The file or folder path.
:returns: Returns a string containing the name of the project
:rtype: str
"""
#assert(isinstance(file_path, (str, unicode)))
if file_path is None:
return None
file_path = os.path.expandvars(
os.path.expanduser(
os.path.normpath(file_path)
)
).replace("\\", "/")
if not file_path.startswith(self.server_path.replace("\\", "/")):
return None
residual = file_path[len(self.server_path.replace("\\", "/"))+1:]
parts = residual.split("/")
if len(parts) > 1:
return parts[0]
return None
def relative_path(self, path):
"""Converts the given path to repository relative path.
If "M:/JOBs/EXPER/_PROJECT_SETUP_" is given it will return
"$REPO/EXPER/_PROJECT_SETUP_"
"""
return path.replace(self.server_path,
"$" + self.conf.repository_env_key)
| [
"eoyilmaz@gmail.com"
] | eoyilmaz@gmail.com |
bef343ea16d689688535c9e4696e408161b5b29d | 8c16c8fa0a32e1b190df3206c739cc844f81df7a | /home/migrations/0002_load_initial_data.py | 808c1c33641ba4e51e3c8fae89ca49b39bf69c32 | [] | no_license | crowdbotics-apps/r123-dev-1618 | c689f7f0e5fb294632fa6abe40b13a2e4b5ed3bc | 192d89a3e82f212700fce8b9160947a73abdc716 | refs/heads/master | 2022-04-02T11:13:12.271354 | 2020-02-06T12:48:14 | 2020-02-06T12:48:14 | 238,687,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "R123"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">R123</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "r123-dev-1618.botics.co"
site_params = {
"name": "R123",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
096f62f342b714d1b73d1cde4d377414250fa6f4 | 02d8a026d63127f045042e03e23acbe6c9675db8 | /vb2py/PythonCard/tools/resourceEditor/modules/backgroundInfoDialog.rsrc.py | 613f3425866455d083794a5997c9411d81d9d930 | [
"BSD-3-Clause"
] | permissive | VB6Hobbyst7/xl_vb2py | 40e77976b452732575e2726fb1f0675b1ab9f86f | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | refs/heads/main | 2023-07-28T20:12:11.933183 | 2021-09-23T18:12:02 | 2021-09-23T18:12:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,268 | py | {'type':'CustomDialog',
'name':'backgroundInfo',
'title':'Background Info',
'position':(53, 94),
'size':(370, 563),
'components': [
{'type':'Button',
'name':'btnCustomize',
'position':(247, 333),
'label':'Customize',
},
{'type':'RadioGroup',
'name':'windowStyle',
'position':(76, 279),
'size':(281, -1),
'items':['Static', 'Resizeable', 'Custom'],
'label':'Window Style',
'layout':'horizontal',
'max':1,
'stringSelection':'Static',
},
{'type':'StaticText',
'name':'stcName',
'position':(10, 10),
'text':'Name:',
},
{'type':'StaticText',
'name':'stcTitle',
'position':(10, 35),
'text':'Title:',
},
{'type':'StaticText',
'name':'stcPosition',
'position':(10, 60),
'text':'Position:',
},
{'type':'StaticText',
'name':'stcSize',
'position':(10, 85),
'text':'Size:',
},
{'type':'StaticText',
'name':'stcForegroundColor',
'position':(10, 110),
'text':'Foreground color:',
},
{'type':'StaticText',
'name':'stcBackgroundColor',
'position':(10, 135),
'text':'Background color:',
},
{'type':'StaticText',
'name':'stcImage',
'position':(10, 160),
'text':'Image:',
},
{'type':'StaticText',
'name':'stcIcon',
'position':(10, 210),
'text':'Icon:',
},
{'type':'TextField',
'name':'fldName',
'position':(130, 5),
},
{'type':'TextField',
'name':'fldTitle',
'position':(130, 30),
'size':(188, -1),
},
{'type':'TextField',
'name':'fldPosition',
'position':(130, 55),
'size':(80, -1),
},
{'type':'TextField',
'name':'fldSize',
'position':(130, 80),
'size':(80, -1),
},
{'type':'TextField',
'name':'fldForegroundColor',
'position':(130, 110),
},
{'type':'Button',
'name':'btnForegroundColor',
'position':(250, 110),
'label':'Color...',
},
{'type':'TextField',
'name':'fldBackgroundColor',
'position':(130, 135),
},
{'type':'Button',
'name':'btnBackgroundColor',
'position':(250, 135),
'label':'Color...',
},
{'type':'TextField',
'name':'fldImage',
'position':(130, 160),
},
{'type':'Button',
'name':'btnFile',
'position':(250, 160),
'label':'File...',
},
{'type':'CheckBox',
'name':'chkTiled',
'position':(130, 185),
'size':(135, -1),
'label':'Tile image',
},
{'type':'TextField',
'name':'fldIcon',
'position':(130, 210),
},
{'type':'Button',
'name':'btnIconFile',
'position':(250, 210),
'label':'File...',
},
{'type':'CheckBox',
'name':'chkStatusBar',
'position':(130, 235),
'label':'Status bar on window',
},
{'type':'CheckBox',
'name':'chkVisible',
'position':(130, 260),
'size':(135, -1),
'checked':True,
'label':'Visible at startup',
},
{'type':'Button',
'id':5100,
'name':'btnOK',
'position':(9, 405),
'default':1,
'label':'OK',
},
{'type':'Button',
'id':5101,
'name':'btnCancel',
'position':(114, 405),
'label':'Cancel',
},
] # end components
} # end CustomDialog
| [
"c.git@pronovost.net"
] | c.git@pronovost.net |
0e4e8d3779836386612dc162af745efcb539ecb8 | 6ac2631c256f156d4ddf169e6c67f1fe66ebcaaf | /062/pyteacher/app_accounts/migrations/0011_auto_20190321_0515.py | e36b9ecbc568798fd0c86cd592db78267480287a | [] | no_license | kasaiee/how-to-pyteacher | 101f106aeeed1b34756cecf502337ff8ee584ff5 | 074a57533f53fd1b8c7f37cd11dbc3b32ab8a08f | refs/heads/master | 2022-12-10T23:50:46.851784 | 2019-07-15T19:31:03 | 2019-07-15T19:31:03 | 187,372,111 | 6 | 4 | null | 2022-12-08T01:55:05 | 2019-05-18T15:08:03 | null | UTF-8 | Python | false | false | 502 | py | # Generated by Django 2.1.5 on 2019-03-21 05:15
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app_base', '0011_auto_20190302_2010'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app_accounts', '0010_auto_20190321_0437'),
]
operations = [
migrations.RenameModel(
old_name='RegisteredCourse',
new_name='RegisteredItem',
),
]
| [
"1tapciran@gmail.com"
] | 1tapciran@gmail.com |
048517a1253073256e7a998e84e5de7e1dcffbcd | 7560e624ac39fcdf44b7b8d747c072c923bb6d1b | /docs/conf.py | 2bc866940e3023d41d63604c316372b4429720d7 | [] | no_license | tardis-sn-archive/tardisatomic | 889473a8c9b466dd433bc89778b16e43857d1652 | bea354a6427d3d9539abbac6a1ce476bdb3c9aaf | refs/heads/master | 2021-05-28T21:54:55.953270 | 2015-05-20T14:22:43 | 2015-05-20T14:22:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,948 | py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# Load all of the global Astropy configuration
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None),
'h5py': ('http://docs.h5py.org/en/latest/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/dev/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None)
}
import sphinx_bootstrap_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'numpydoc',
'astropy_helpers.sphinx.ext.automodapi'
]
source_suffix = '.rst'
## get's rid of many toctree contains errors: see https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
extensions += ['matplotlib.sphinxext.plot_directive',
'sphinxcontrib.bibtex']
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates']
#exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog = """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = u'TARDIS Atomic'
author = u'TARDIS team'
copyright = u'2013, ' + author
master_doc = 'index'
#default_role = 'obj'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import tardisatomic
# The short X.Y version.
version = tardisatomic.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = tardisatomic.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'tardis_logo.ico'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
| [
"wkerzendorf@gmail.com"
] | wkerzendorf@gmail.com |
168901e7e84d4b1ade6c2cd222d40e16ee48d113 | 7ec38beb6f041319916390ee92876678412b30f7 | /src/leecode/medium_0885.py | 54fe1f13b536aa90188d941dedc3bf3b9271e365 | [] | no_license | hopensic/LearnPython | 3570e212a1931d4dad65b64ecdd24414daf51c73 | f735b5d865789843f06a623a4006f8883d6d1ae0 | refs/heads/master | 2022-02-18T23:11:30.663902 | 2022-02-12T17:51:56 | 2022-02-12T17:51:56 | 218,924,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | from datetime import datetime
'''
tag: ^0885 ^medium ^math
name: ^(Spiral Matrix III)
'''
class Solution:
def spiralMatrixIII(self, R: int, C: int, r0: int, c0: int):
res = []
loop_count = max(r0, R - r0, c0, C - c0) + 2
destX, destY = c0, r0
mat = [[0] * C for _ in range(R)]
total = R * C
c = 0
for num in range(1, loop_count):
# 求 east的方向
srcX = destX
srcY = destY
destX = c0 + num if c0 + num < C else C - 1
destY = srcY
for i in range(srcX, destX + 1):
if mat[srcY][i] == 0:
mat[srcY][i] += 1
res.append((srcY, i))
c += 1
if c == total:
break
# 求 south的方向
srcX = destX
srcY = destY
destX = srcX
destY = r0 + num if r0 + num < R else R - 1
for i in range(srcY, destY + 1):
if mat[i][srcX] == 0:
mat[i][srcX] += 1
res.append((i, srcX))
c += 1
if c == total:
break
# west
srcX = destX
srcY = destY
destX = c0 - num if c0 - num >= 0 else 0
destY = srcY
for i in range(srcX, destX - 1, -1):
if mat[srcY][i] == 0:
mat[srcY][i] += 1
res.append((srcY, i))
c += 1
if c == total:
break
# north
srcX = destX
srcY = destY
destX = srcX
destY = r0 - num if r0 - num >= 0 else 0
for i in range(srcY, destY - 1, -1):
if mat[i][srcX] == 0:
mat[i][srcX] += 1
res.append((i, srcX))
c += 1
if c == total:
break
return res
R = 3
C = 3
r0 = 2
c0 = 2
t1 = datetime.now()
s = Solution()
print(s.spiralMatrixIII(R, C, r0, c0))
t2 = datetime.now()
print(t2 - t1)
| [
"hopensic@gmail.com"
] | hopensic@gmail.com |
155930bcbbb9b559f4026ae42f775034e140cbe7 | 4d99350a527a88110b7bdc7d6766fc32cf66f211 | /OpenGLCffi/GL/EXT/KHR/robustness.py | 39eb5aee53fafaf629f399a33917ceedf689edd4 | [
"MIT"
] | permissive | cydenix/OpenGLCffi | e790ef67c2f6c9877badd5c38b7d58961c8739cd | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | refs/heads/master | 2021-01-11T07:31:10.591188 | 2017-04-17T11:04:55 | 2017-04-17T11:04:55 | 80,312,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | from OpenGLCffi.GL import params
@params(api='gl', prms=[])
def glGetGraphicsResetStatus():
pass
@params(api='gl', prms=['x', 'y', 'width', 'height', 'format', 'type', 'bufSize', 'data'])
def glReadnPixels(x, y, width, height, format, type, bufSize, data):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformfv(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformiv(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformuiv(program, location, bufSize, params):
pass
@params(api='gl', prms=[])
def glGetGraphicsResetStatusKHR():
pass
@params(api='gl', prms=['x', 'y', 'width', 'height', 'format', 'type', 'bufSize', 'data'])
def glReadnPixelsKHR(x, y, width, height, format, type, bufSize, data):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformfvKHR(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformivKHR(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformuivKHR(program, location, bufSize, params):
pass
| [
"cdenizol@gmail.com"
] | cdenizol@gmail.com |
b79e99dc04ca2be2a69ca6079afc7b69c0afa6cd | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Quote18/HQ_18_156.py | 2d711354476219ece70b5d19df540986a8e7b237 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_156(xtp_test_case):
def subTickByTick(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_all_tick_by_tick(data, error, is_last):
pass
def on_unsub_tick_by_tick(data, error, is_last):
self.print_msg(case_name, rs_expect, error)
Api.setSubTickByTickHandle(on_all_tick_by_tick)
Api.setUnSubscribeTickByTickHandle(on_unsub_tick_by_tick)
Api.SubscribeTickByTick(stk_info)
Api.UnSubscribeTickByTick(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_156(self):
pyname = 'HQ_18_156'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '!@#¥%……&×()<>?', 'exchange_id': 2}
self.subTickByTick(Api, stk_info, pyname,
{'error_id': 11200003, 'error_msg': 'unknown security'}) # 5
Api.Logout()
if __name__=='__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
4584b6d202923b7876acb783ea7f94a55dccd0e4 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/command_lib/storage/file_download_task.py | 55c926f4ed9ddba8c6feb5c5f335d5da0e5ad498 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 2,211 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for file downloads.
Typically executed in a task iterator:
googlecloudsdk.command_lib.storage.task_executor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.storage import api_factory
from googlecloudsdk.command_lib.storage import storage_url
from googlecloudsdk.command_lib.storage import task
from googlecloudsdk.core.util import files
class FileDownloadTask(task.Task):
"""Represents a command operation triggering a file download.
Attributes:
destination_local_path (str): The local filesystem path to write the file
to.
source_object_reference (resource_reference.ObjectReference): Must
contain the full path of object to download, including bucket.
Directories will not be accepted.
"""
def __init__(self, destination_local_path, source_object_reference):
super(FileDownloadTask, self).__init__()
self.download_stream = files.FileWriter(destination_local_path)
cloud_url = storage_url.CloudUrl.from_url_string(
source_object_reference.storage_url.url_string)
self.provider = cloud_url.scheme
self.bucket_name = cloud_url.bucket_name
self.object_name = cloud_url.object_name
def execute(self, callback=None):
# TODO(b/162264437): Support all of DownloadObject's parameters.
api_factory.get_api(self.provider).DownloadObject(self.bucket_name,
self.object_name,
self.download_stream)
| [
"gcloud@google.com"
] | gcloud@google.com |
07be9fff7ab78d6a62e09d5b2b53cd61b81cb9aa | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2671/60767/236760.py | 65aabd02d6cd17bfb4abc0f16f21487ef1587e78 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | def has11(num):
while(num>=1):
if(num%2==1):
num = num>>1
if(num%2==1):
return True
num = num>>1
return False
numOfTests = int(input())
Tests = []
for i in range(0,numOfTests):
Tests.append(int(input()))
for test in Tests:
temp = []
cnt = 0
for i in range(0,test):
temp.append("1")
s = "".join(temp)
maxNum = int(s,base=2)
for x in range(1,maxNum+1):
if(has11(x)):
cnt = cnt+1
print(cnt) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
34d0c59dbfbfa437e013e76dcd0fc29661a04b84 | 1259ee2a27cbb2d7de3e034159957d6043161add | /tests/roots/test-ext-autodoc/conf.py | 9f026eb8deab5ef78ede37824b69d322c6f75fc5 | [
"MIT",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | balabit-deps/balabit-os-7-sphinx | f7b0ad4967418f074e8876cd8c7f4a7f5cfbe5d3 | 4e18ca37f4ddddf346c0b30835a544db20887259 | refs/heads/master | 2020-04-07T09:14:11.757278 | 2018-04-11T21:10:19 | 2018-07-20T22:59:13 | 158,244,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | import sys, os
sys.path.insert(0, os.path.abspath('.'))
extensions = ['sphinx.ext.autodoc']
# The suffix of source filenames.
source_suffix = '.rst'
autodoc_mock_imports = [
'dummy'
]
nitpicky = True
| [
"testbot@balabit.com"
] | testbot@balabit.com |
80d765845dcafee702bed7550ecb6e0196682ee9 | 30ac2f9831ebd33885a6f48d153356c2e3731c26 | /Python_Stack/flask/playground_project/server.py | 12ab16b5079218d55d52fb8fb967d84a7c922824 | [] | no_license | pharaoht/Coding-Dojo-Projects | 192cfd8c36b6dadb049e81d31bd780c7ab340d1e | 504f71acbac3c006cf866a08aea0566058f81ce2 | refs/heads/master | 2023-05-11T21:09:17.316257 | 2021-06-08T00:54:09 | 2021-06-08T00:54:09 | 334,003,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/play")
def showboxes():
return render_template("index1.html")
@app.route("/play/<num>")
def showboxes1(num):
return render_template("index.html", num=int(num))
@app.route("/play/<num>/<color>")
def showboxes2(num, color):
return render_template("index3.html", num=int(num), color=color)
if __name__ == "__main__":
app.run(debug=True)
| [
"pharaohmanson@gmail.com"
] | pharaohmanson@gmail.com |
8a903a879f726b3a88d3a2e6e5a2b06370843dcb | 9aa85999021da96ce0a7d76789c1298d174d1835 | /blogs/migrations/0076_auto_20200128_1850.py | 649f921de7e73d9b82215f179e7f222baced5e4f | [] | no_license | m0bi5/ISTE-NITK_Website | 20b83a3a629836c33c7478c0af834f6f57e0e907 | 2e186bb1ba457c930f9b691cc5a5584b8e3c270c | refs/heads/master | 2022-11-24T03:02:49.354491 | 2020-07-24T15:43:44 | 2020-07-24T15:43:44 | 184,452,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.2.4 on 2020-01-28 18:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0075_auto_20200128_1844'),
]
operations = [
migrations.AlterField(
model_name='bloghits',
name='created',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 28, 18, 50, 19, 836941)),
),
]
| [
"amodhshenoy@gmail.com"
] | amodhshenoy@gmail.com |
55e8252223a59f65395fe1c7789fd74a8d4a78ba | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02792/s894845946.py | a0b1659006571d0d9285032ffceafc05e79611c9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | # import sys
# sys.setrecursionlimit(10 ** 6)
def cmb(n, r):
import math
if n < r:
return 0
return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))
# from decorator import stop_watch
#
#
# @stop_watch
def solve(N):
ABs = {}
ABs_visited = {}
for h in range(1, 10):
for t in range(1, 10):
ABs.setdefault((h, t), 0)
ABs_visited.setdefault((h, t), False)
for n in range(1, N + 1):
s = str(n)
h, t = int(s[0]), int(s[-1])
if not (h == 0 or t == 0):
ABs[(h, t)] += 1
ans = 0
for k in ABs:
if ABs_visited[k]:
continue
h, t = k
ABs_visited[(h, t)] = True
# ABs_visited[(t, h)] = True
# if h == t:
# ans += ABs[(h, t)] + cmb(ABs[(h, t)], 2)
# else:
# ans += ABs[(h, t)] * ABs[(t, h)]
ans += ABs[(h, t)] * ABs[(t, h)]
# print(k, ABs[k], ans)
# input()
# print(ABs)
print(ans)
if __name__ == '__main__':
N = int(input())
solve(N)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d998ef3b950460ec243b2d468ca72164ed6addba | 7fdefad804586192915fc298a63db136c0863995 | /wxPython in Action/wxPythonInAction-src/Chapter-15/tree_simple.py | b9863e92018508c953ddaf7a65d656e00e8287f4 | [] | no_license | typ0520/python_ebook | 2ca948937e2f390a4e4c2ac57f6cd3124ab507a0 | 9abda102b9f245178b61bf9ffca0e633ad96fec1 | refs/heads/master | 2021-06-20T14:14:22.813999 | 2017-08-14T07:31:41 | 2017-08-14T07:31:41 | 98,658,811 | 0 | 1 | null | 2017-07-28T14:43:55 | 2017-07-28T14:43:55 | null | UTF-8 | Python | false | false | 1,913 | py | import wx
import data
class TestFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="simple tree", size=(400,500))
# Create the tree
self.tree = wx.TreeCtrl(self)
# Add a root node
root = self.tree.AddRoot("wx.Object")
# Add nodes from our data set
self.AddTreeNodes(root, data.tree)
# Bind some interesting events
self.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded, self.tree)
self.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed, self.tree)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged, self.tree)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnActivated, self.tree)
# Expand the first level
self.tree.Expand(root)
def AddTreeNodes(self, parentItem, items):
"""
Recursively traverses the data structure, adding tree nodes to
match it.
"""
for item in items:
if type(item) == str:
self.tree.AppendItem(parentItem, item)
else:
newItem = self.tree.AppendItem(parentItem, item[0])
self.AddTreeNodes(newItem, item[1])
def GetItemText(self, item):
if item:
return self.tree.GetItemText(item)
else:
return ""
def OnItemExpanded(self, evt):
print "OnItemExpanded: ", self.GetItemText(evt.GetItem())
def OnItemCollapsed(self, evt):
print "OnItemCollapsed:", self.GetItemText(evt.GetItem())
def OnSelChanged(self, evt):
print "OnSelChanged: ", self.GetItemText(evt.GetItem())
def OnActivated(self, evt):
print "OnActivated: ", self.GetItemText(evt.GetItem())
app = wx.PySimpleApp(redirect=True)
frame = TestFrame()
frame.Show()
app.MainLoop()
| [
"jason_yao@htc.com"
] | jason_yao@htc.com |
475fa382a546505487a35004c7fe5da4a68bd9ac | 208baab269ddffab1a93e7dc70b052d07bf50560 | /hood/migrations/0004_editor.py | 363a783e7d71c25eefd44a4b578ec7916b9cdc00 | [] | no_license | marysinaida/Neighborhood | a1035f09515ae9a24bed74ddf1263e06db134c94 | a285df5528bb99d6cb69f9ab41e320682422fe9d | refs/heads/master | 2020-12-13T23:29:18.148498 | 2020-01-21T15:04:53 | 2020-01-21T15:04:53 | 234,562,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-01-21 10:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('hood', '0003_auto_20200121_1312'),
]
operations = [
migrations.CreateModel(
name='Editor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
],
),
]
| [
"marydorcassinaida54@gmail.com"
] | marydorcassinaida54@gmail.com |
8db6c287bc1a3eac410d8592ce97a1a24f13b860 | 3ee1bb0d0acfa5c412b37365a4564f0df1c093fb | /ml/m14_pipeline2_4_boston.py | cb55ea1a8f86bd861a41b7075007639d2c8f3b1b | [] | no_license | moileehyeji/Study | 3a20bf0d74e1faec7a2a5981c1c7e7861c08c073 | 188843c6415a4c546fdf6648400d072359d1a22b | refs/heads/main | 2023-04-18T02:30:15.810749 | 2021-05-04T08:43:53 | 2021-05-04T08:43:53 | 324,901,835 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,655 | py | # Pipeline, make_pipeline
# 모델 비교
# 2번부터 RandomForest 모델 사용
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.pipeline import Pipeline, make_pipeline # concatenate와 Concatenate의 차이와 같음
# 모델 import
from sklearn.svm import LinearSVC, SVC
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
# 1. 데이터
dataset = load_boston()
x = dataset.data
y = dataset.target
# 전처리
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, random_state = 120, shuffle = True)
# Pipeline 사용시 필요 없음
# scaler = MinMaxScaler()
# scaler.fit(x_train)
# x_train = scaler.transform(x_train)
# x_test = scaler.transform(x_test)
# 2. 모델구성
# ====================================================================Pipeline
# Pipeline, make_pipeline : 전처리와 모델을 연결(통로)
# 별도 MinMaxScaler 필요없음
scalers = np.array([MinMaxScaler(), StandardScaler()])
for scaler in scalers:
print('==========================',scaler)
model_Pipeline = Pipeline([('scaler', scaler), ('malddong', RandomForestRegressor())])
model_make_pipeline = make_pipeline(scaler, RandomForestRegressor())
# 3. 훈련
model_Pipeline.fit(x_train, y_train)
model_make_pipeline.fit(x_train, y_train)
# 4. 평가
results1 = model_Pipeline.score(x_test, y_test)
results2 = model_make_pipeline.score(x_test, y_test)
print('model_Pipeline의 score : ', results1)
print('model_make_pipeline의 score : ', results2)
'''
1. Tensorflow :
CNN모델 r2 : 0.9462232137123261
2. RandomForest모델 :
============================================GridSearchCV
최종 정답률 : 0.8571954130553036
34.47초 걸렸습니다
============================================RandomizedSearchCV
최종 정답률 : 0.8542102932416746
13.23초 걸렸습니다
3. RandomForest모델, Pipeline() :
========================== MinMaxScaler()
model_Pipeline의 score : 0.8513337126169909
model_make_pipeline의 score : 0.8513337126169909
========================== StandardScaler()
model_Pipeline의 score : 0.8471314230423943
model_make_pipeline의 score : 0.8471314230423943
'''
| [
"noreply@github.com"
] | moileehyeji.noreply@github.com |
fea6c22074c77c9e5682f20c035d3b881dfa6d4f | ed218f5ea54eac34743f22596eae60242bb73004 | /backend/chat/admin.py | 5e83fe01bb146e7b8ebb6050982a7647e1960b63 | [] | no_license | crowdbotics-apps/kids-teach-kids-18288 | 65566d8ae92964c1e482c79883d1bce2bf7ff6f0 | 3b54f77ed6366541a4cd041d85934c8c802b409b | refs/heads/master | 2022-11-05T14:28:05.406100 | 2020-06-20T19:03:20 | 2020-06-20T19:03:20 | 273,767,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from django.contrib import admin
from .models import (
Message,
ThreadMember,
MessageAction,
ThreadAction,
ForwardedMessage,
Thread,
)
admin.site.register(ThreadAction)
admin.site.register(ForwardedMessage)
admin.site.register(MessageAction)
admin.site.register(Thread)
admin.site.register(ThreadMember)
admin.site.register(Message)
# Register your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
667f652834d1ed267e8db34154d55671fed9c562 | 5b28005b6ee600e6eeca2fc7c57c346e23da285f | /nomadic_recording_lib/ui/iOSCControl/sessionselect.py | 6669a04c959c24f1afc8377ae5c8dea8ae353723 | [] | no_license | nocarryr/wowza_logparse | c31d2db7ad854c6b0d13495a0ede5f406c2fce3f | d6daa5bf58bae1db48ac30031a845bf975c7d5cc | refs/heads/master | 2021-01-17T07:19:00.347206 | 2017-06-24T16:57:32 | 2017-06-24T16:57:32 | 25,835,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | from Bases import OSCBaseObject
import widgets
class SessionSelect(OSCBaseObject):
_Properties = {'selection':dict(type=str, quiet=True)}
def __init__(self, **kwargs):
self.iOsc = kwargs.get('iOsc')
self.client = kwargs.get('client')
kwargs.setdefault('osc_parent_node', self.client.osc_node)
kwargs.setdefault('osc_address', 'SessionSelect')
kwargs.setdefault('ParentEmissionThread', self.iOsc.ParentEmissionThread)
super(SessionSelect, self).__init__(**kwargs)
x = .25
y = .1
w = .25
h = .1
bounds = [x, y, w, h]
self.topwidget = self.iOsc.add_widget('Label',
name='topwidget',
bounds=bounds,
osc_parent_node=self.osc_node,
client=self.client,
value='Select Session')
self.session_btns = {}
sessions = sorted(self.iOsc.comm.osc_io.discovered_sessions.keys())
for i, key in enumerate(sessions):
if key is None:
continue
y += h
bounds = [x, y, w, h]
btn = self.topwidget.add_widget(SessionButton, name=key, index=i, bounds=bounds)
self.session_btns[key] = btn
btn.bind(touch_state=self.on_session_btn_touch)
def unlink(self):
self.topwidget.remove()
super(SessionSelect, self).unlink()
def on_session_btn_touch(self, **kwargs):
state = kwargs.get('value')
btn = kwargs.get('obj')
if state and self.selection is None:
self.selection = btn.name
self.LOG.info(self.selection)
class SessionButton(widgets.Toggle):
def __init__(self, **kwargs):
self.index = kwargs.get('index')
kwargs['label'] = kwargs['name']
super(SessionButton, self).__init__(**kwargs)
| [
"matt@nomadic-recording.com"
] | matt@nomadic-recording.com |
59982cb9f893c43e0cf6038a51750f94da4c4fb5 | 1dd72195bc08460df7e5bb82d3b7bac7a6673f49 | /api/alembic/versions/69cbd7ca2477_add_gfs_prediction_model.py | 856f0767fd5fe67adc724458b639c5b7b9e2571f | [
"Apache-2.0",
"MIT"
] | permissive | bcgov/wps | c4347c39cadfad6711502d47776abc8d03895593 | 0ba707b0eddc280240964efa481988df92046e6a | refs/heads/main | 2023-08-19T00:56:39.286460 | 2023-08-16T18:03:06 | 2023-08-16T18:03:06 | 235,861,506 | 35 | 9 | Apache-2.0 | 2023-09-11T21:35:07 | 2020-01-23T18:42:10 | Python | UTF-8 | Python | false | false | 573 | py | """Add GFS prediction model
Revision ID: 69cbd7ca2477
Revises: de8355996f8e
Create Date: 2023-02-09 14:30:49.597571
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '69cbd7ca2477'
down_revision = 'de8355996f8e'
branch_labels = None
depends_on = None
def upgrade():
op.execute('INSERT INTO prediction_models(name, abbreviation, projection)\
VALUES(\'Global Forecast System\', \'GFS\', \'lonlat.0.5deg\')')
def downgrade():
op.execute('DELETE FROM prediction_models WHERE abbreviation = \'GFS\'')
| [
"noreply@github.com"
] | bcgov.noreply@github.com |
2344ae408834eb1cc85fd232d50f961cdc1f96b4 | 09c97a53c39c83bef52d15db6644a27a3bbf229f | /part01-web/day07~_Python/todoMgrSystem/view/menu_view.py | e8eb413180911442b7d826719825336e9c65ee92 | [] | no_license | wansang93/Cloud_multicampus | aa0372914e28ebdc76f5d683e9886841be5f5891 | 646325cee93d6bcabd4163c7bb099e4e92621e9f | refs/heads/master | 2023-03-27T07:02:41.552663 | 2021-03-24T01:53:07 | 2021-03-24T01:53:07 | 326,610,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | from entity.todo import Todo
"""
Define View
"""
### Menu ###
# show menu
def display_menu():
print('==================================================')
print('등록(1) 보기(2) 수정(3) 삭제(4) 모두 삭제(5) 나가기(x)')
# select menu number
def select_menu():
menu = input()
return menu
# 메뉴를 다시 입력하라는 문구
def display_reinput():
print('메뉴를 잘못 입력하였습니다. 다시 입력해 주세요.')
# 메뉴1. 등록하기(Create)
def display_register():
while True:
todo_id = input('id를 입력해 주세요(유니크 값으로): ')
# 사용자 입력 폼에서 에러 체크
if not todo_id.isdecimal():
print('id는 숫자를 입력해 주세요.')
else:
break
while True:
todo_what = input('할 일을 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_what:
print('공백이 아닌 것으로 입력해 주세요.')
else:
break
return Todo(todo_id, todo_what)
# 메뉴3. 수정하기(Update)
# 해당하는 id가 있는지 확인하기
def check_id_for_update():
while True:
todo_id = input('업데이트 할 id를 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_id.isdecimal():
print('id는 숫자를 입력해 주세요.')
else:
break
return todo_id
def get_what_for_update():
while True:
todo_what = input('수정 사항을 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_what:
print('공백이 아닌 것으로 입력해 주세요.')
else:
break
return todo_what
# 메뉴4. 삭제하기(Delete)
def check_id_for_delete():
while True:
todo_id = input('삭제할 할 id를 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_id.isdecimal():
print('id는 숫자를 입력해 주세요.')
else:
break
return todo_id
# 메뉴5. 전부 삭제하기(Delete All)
def delete_all():
print('메모를 전부 삭제합니다.')
text = input('정말로 실행하시겠습니까? [y/n]: ')
while True:
if text in ['y', 'Y', 'n', 'N']:
break
print('y 또는 n 으로 입력해 주세요.')
return text
| [
"wansang93@naver.com"
] | wansang93@naver.com |
325e2088f1a70378cc88cdf3a96e15fa365b4554 | 35aca1291dae461d5562a3b7484e5f659ee80817 | /oneflow/__main__.py | b0b1d9d18c301e9044ada93b56b5d3610add3d7f | [
"Apache-2.0"
] | permissive | Flowingsun007/oneflow | e6a52cfbf5e82ca4f8b787aa026f40a2f568a10f | c1880c011dd453719a28d880abe15e2dab8d0da1 | refs/heads/master | 2023-05-11T19:18:59.220269 | 2021-05-28T20:10:35 | 2021-05-28T20:10:35 | 372,195,705 | 0 | 0 | Apache-2.0 | 2021-06-02T09:46:51 | 2021-05-30T11:24:37 | null | UTF-8 | Python | false | false | 1,574 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--start_worker", default=False, action="store_true", required=False
)
parser.add_argument("--env_proto", type=str, required=False)
parser.add_argument("--doctor", default=False, action="store_true", required=False)
args = parser.parse_args()
def StartWorker(env_proto):
import oneflow._oneflow_internal
oneflow._oneflow_internal.InitEnv(env_proto)
def main():
start_worker = args.start_worker
if start_worker:
env_proto = args.env_proto
assert os.path.isfile(
env_proto
), "env_proto not found, please check your env_proto path: {}".format(env_proto)
with open(env_proto, "rb") as f:
StartWorker(f.read())
if args.doctor:
import oneflow
print("path:", oneflow.__path__)
print("version:", oneflow.__version__)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Flowingsun007.noreply@github.com |
c74cf90baafe4882b333e01b28d7a2e85ebfb96b | 130e9ef21397b5263ecaf2923f3a196eba58ef5a | /pyxel/ui/number_picker.py | e7a1d3b6ec2d162aa814b72adab9af68df6dec6c | [
"MIT"
] | permissive | sacredhotdog/pyxel | 29571dd3daef6d813f9fdd833bf55e5ba0af689a | 08da48dbd1ac53c06cf8a383f28d66fd89f78f4a | refs/heads/master | 2020-04-04T16:35:26.370822 | 2018-11-04T09:06:05 | 2018-11-04T09:06:05 | 156,084,070 | 0 | 0 | MIT | 2018-11-04T13:18:19 | 2018-11-04T13:18:19 | null | UTF-8 | Python | false | false | 2,600 | py | import pyxel
from .constants import INPUT_FIELD_COLOR, INPUT_TEXT_COLOR
from .text_button import TextButton
from .widget import Widget
class NumberPicker(Widget):
"""
Events:
__on_change(value)
"""
def __init__(self, parent, x, y, min_value, max_value, value, **kwargs):
self._number_len = max(len(str(min_value)), len(str(max_value)))
width = self._number_len * 4 + 21
height = 7
super().__init__(parent, x, y, width, height, **kwargs)
self._min_value = min_value
self._max_value = max_value
self._value = None
self.dec_button = TextButton(self, x, y, "-")
self.inc_button = TextButton(self, x + width - 7, y, "+")
self.add_event_handler("enabled", self.__on_enabled)
self.add_event_handler("disabled", self.__on_disabled)
self.add_event_handler("draw", self.__on_draw)
self.dec_button.add_event_handler("press", self.__on_dec_button_press)
self.dec_button.add_event_handler("repeat", self.__on_dec_button_press)
self.inc_button.add_event_handler("press", self.__on_inc_button_press)
self.inc_button.add_event_handler("repeat", self.__on_inc_button_press)
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._value != value:
self._value = value
self.call_event_handler("change", value)
self.dec_button.is_enabled = self._value != self._min_value
self.inc_button.is_enabled = self._value != self._max_value
def __on_enabled(self):
self.dec_button.is_enabled = self._value != self._min_value
self.inc_button.is_enabled = self._value != self._max_value
def __on_disabled(self):
self.dec_button.is_enabled = False
self.inc_button.is_enabled = False
def __on_draw(self):
x1 = self.x
y1 = self.y
x2 = self.x + self.width - 1
y2 = self.y + self.height - 1
pyxel.rect(x1 + 9, y1, x2 - 9, y2, INPUT_FIELD_COLOR)
pyxel.text(
self.x + 11,
self.y + 1,
("{:>" + str(self._number_len) + "}").format(self._value),
INPUT_TEXT_COLOR,
)
def __on_dec_button_press(self):
offset = 10 if pyxel.btn(pyxel.KEY_SHIFT) else 1
self.value = max(self._value - offset, self._min_value)
def __on_inc_button_press(self):
offset = 10 if pyxel.btn(pyxel.KEY_SHIFT) else 1
self.value = min(self._value + offset, self._max_value)
| [
"takashi.kitao@gmail.com"
] | takashi.kitao@gmail.com |
268eb15f65ebda9c888438d11aa9f83830e6d243 | 25ba5fb4e2d02d6949f85ca49e11a70d3960432d | /lib/python/treadmill/alert/__init__.py | 9fef9e3d07e7fbee2c8270059253dff919d686f5 | [
"Apache-2.0"
] | permissive | crazyrex/treadmill | 9c007967db9470685b2d11e2232ad9926b47733e | 75be287a808a4cbdacab67b3f62a3cb3eb1eab67 | refs/heads/master | 2020-03-31T03:55:39.713552 | 2018-10-04T15:28:36 | 2018-10-04T15:28:36 | 151,884,550 | 1 | 0 | Apache-2.0 | 2018-10-06T21:50:04 | 2018-10-06T21:50:03 | null | UTF-8 | Python | false | false | 1,360 | py | """Treadmill alert module.
"""
import io
import json
import os.path
import time
from treadmill import fs
def create(alerts_dir,
epoch_ts=None,
instanceid=None,
summary=None,
type_=None,
**alert_data):
"""Create a file in alerts_dir representing the alert.
"""
if not epoch_ts:
epoch_ts = time.time()
alert_data.update(
{
'epoch_ts': epoch_ts,
'instanceid': instanceid,
'summary': summary,
'type_': type_,
}
)
fs.write_safe(
os.path.join(alerts_dir, _to_filename(instanceid, type_)),
lambda f: f.write(
json.dumps(alert_data, indent=4).encode()
),
prefix='.tmp',
permission=0o644
)
def _to_filename(instanceid, type_):
"""Returns a host wide unique filename for the alert.
Alerts sorted alphabetically result in chronological order.
"""
return '{:f}-{}-{}'.format(
time.monotonic(), instanceid, type_
).replace(os.path.sep, '_')
def read(filename, alerts_dir=None):
"""Return the alert stored in the file.
"""
if alerts_dir is not None:
filename = os.path.join(alerts_dir, filename)
with io.open(filename, 'rb') as file_:
alert = json.loads(file_.read().decode())
return alert
| [
"ceache@users.noreply.github.com"
] | ceache@users.noreply.github.com |
6d3769cc470c8fe3b6958de2bbaec474c6edbc6b | ec68eee1abe0f900210c2bad51b64fb8a1053d5d | /fullerene/config.py | f1f81f62399499bb184b1eb86504f3882803b09d | [] | no_license | bitprophet/fullerene | 36a011eebf1ef1a14f963ed8101334c608757b92 | edb9afe6c07c9d610dfa8630142abb96382ff0c1 | refs/heads/master | 2020-05-18T20:27:56.612256 | 2012-02-11T00:14:52 | 2012-02-11T00:14:52 | 2,579,486 | 15 | 1 | null | 2017-12-11T17:30:57 | 2011-10-14T23:37:06 | Python | UTF-8 | Python | false | false | 2,804 | py | import yaml
from graphite import Graphite
from metric import Metric
class Config(object):
def __init__(self, text):
# Load up
config = yaml.load(text)
# Required items
try:
try:
exclude_hosts = config['hosts']['exclude']
except KeyError:
exclude_hosts = []
self.graphite = Graphite(
uri=config['graphite_uris']['internal'],
exclude_hosts=exclude_hosts
)
except KeyError:
raise ValueError, "Configuration must specify graphite_uris: internal"
# Optional external URL (for links)
self.external_graphite = config['graphite_uris'].get('external', None)
# 'metrics' section
self.metrics = {}
for name, options in config.get('metrics', {}).iteritems():
self.metrics[name] = Metric(
options=options,
config=self,
name=name
)
# Metric groups
self.groups = {}
for name, metrics in config.get('metric_groups', {}).iteritems():
if name not in self.groups:
self.groups[name] = {}
for item in metrics:
self.groups[name][item] = self.parse_metric(item)
# 'collections'
self.collections = config.get('collections', {})
for collection in self.collections.values():
# Instantiate metrics where needed
for group in collection['groups'].values():
group['metrics'] = map(self.parse_metric, group['metrics'])
if 'overview' in group:
group['overview'] = map(
self.parse_metric,
group['overview'][:]
)
# Default graph args
self.defaults = config.get('defaults', {})
# Timeperiod aliases
self.periods = config.get('periods', {})
def parse_metric(self, item):
exists = False
try:
exists = item in self.metrics
except TypeError:
pass
# Name + name already exists as a metric alias == use that
if exists:
metric = self.metrics[item]
else:
# String == metric path == make new metric from it
if isinstance(item, basestring):
metric = Metric({'path': item}, config=self, name=item)
# Non-string == assume hash/dict == make metric from that (assumes
# one-item dict, name => metric)
else:
name, value = item.items()[0]
metric = Metric(name=name, config=self, options=value)
return metric
@property
def metric_groups(self):
return sorted(self.groups)
| [
"jeff@bitprophet.org"
] | jeff@bitprophet.org |
5b8a25a8b5efba5420da7435bb20a6ce21dac8c9 | a0f0efaaaf69d6ccdc2a91596db29f04025f122c | /install/nav_2d_msgs/lib/python2.7/dist-packages/nav_2d_msgs/msg/_Twist2D.py | d9e163e152623a1775bb41432708d59da2817b86 | [] | no_license | chiuhandsome/ros_ws_test-git | 75da2723154c0dadbcec8d7b3b1f3f8b49aa5cd6 | 619909130c23927ccc902faa3ff6d04ae0f0fba9 | refs/heads/master | 2022-12-24T05:45:43.845717 | 2020-09-22T10:12:54 | 2020-09-22T10:12:54 | 297,582,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,788 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from nav_2d_msgs/Twist2D.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Twist2D(genpy.Message):
_md5sum = "938fa65709584ad8e77d238529be13b8"
_type = "nav_2d_msgs/Twist2D"
_has_header = False # flag to mark the presence of a Header object
_full_text = """float64 x
float64 y
float64 theta
"""
__slots__ = ['x','y','theta']
_slot_types = ['float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
x,y,theta
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Twist2D, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.theta is None:
self.theta = 0.
else:
self.x = 0.
self.y = 0.
self.theta = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.theta))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 24
(_x.x, _x.y, _x.theta,) = _get_struct_3d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.theta))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 24
(_x.x, _x.y, _x.theta,) = _get_struct_3d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
| [
"chiuhandsome1966@gmail.com"
] | chiuhandsome1966@gmail.com |
0b82641f368069e443e83c50231fbc5e08c0a609 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02584/s013302359.py | f67303454d5f33e7cc48ea5bafc8491a565b7b35 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | x, k, d = map(int, input().split())
cur = abs(x)
rem = k
cnt = min(cur // d, k)
cur = cur - d * cnt
rem = rem - cnt
if rem > 0:
if rem % 2 == 1:
cur = cur - d
ans = abs(cur)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c1767d932fff8f34fbd63518d4b75eae263689f5 | 3a8cb8a3639cee11ec5e9c8e4a0a5f940b711dec | /tests/repos/converters.py | 8510d4d3b9760df9ba3b12a1f22052591b31504e | [
"Apache-2.0"
] | permissive | pofatu/pygeoroc | f35686e19dc327159ce4285e18e2590e91332a23 | c722da35ab36f2fdcc3d793a025bb81d217238e1 | refs/heads/master | 2022-07-09T15:37:31.134567 | 2022-06-29T12:52:34 | 2022-06-29T12:52:34 | 253,836,063 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from pygeoroc.errata import CONVERTERS
FIELDS = {
'LAND_OR_SEA': CONVERTERS.upper,
}
COORDINATES = {
'NEW_CALEDONIA.csv': {
'latitude': CONVERTERS.negative,
'longitude': CONVERTERS.positive,
}
}
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
2148763f39be962a4ae2cb0e8be40e490b756cb2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/3117.py | 4b41befbf837cdaf9d873331ac63d00b40d7c5e5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | infile = open('A-large.in', 'r')
outfile = open('out.txt', 'w')
cases = int(infile.readline())
for i in range(1, cases+1):
line = infile.readline().split()
s_max = int(line[0])
audience = line[1]
standing = 0
ans = 0
if s_max != 0:
for j in range(0, s_max + 1):
if standing >= j:
standing += int(audience[j])
else:
invites = j - standing
ans += invites
standing += int(audience[j]) + invites
outfile.write("Case #" + str(i) + ": " + str(ans) + "\n")
infile.close()
outfile.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
968ffe2bd211dc6988e1de691ba4a0c09270e96c | 5df4d172df0bc6b6c8e021e44a0cfa50a6b01251 | /src/sanic/run_websocket_server.py | bd4e50f51c4efe2153f03012dfb1290d89996a43 | [
"MIT"
] | permissive | kingking888/MocaBliveAPI | 225ba33663f41c08ac358e5b138c57e26381d8f0 | 205bf4eec2becd0bf5a5a64f5d98718a73f51543 | refs/heads/master | 2022-10-18T08:59:53.301603 | 2020-06-08T10:59:08 | 2020-06-08T10:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,401 | py | # Ω*
# ■ ■■■■■
# ■ ■■ ■■
# ■ ■■ ■
# ■ ■■
# ■■■■■ ■ ■■■
# ■■ ■■ ■ ■■■
# ■■ ■■ ■ ■■■■
# ■■ ■■ ■ ■■■■
# ■■■■■■■■■ ■ ■■■
# ■■ ■ ■■
# ■■ ■ ■■
# ■■ ■ ■ ■■ ■■
# ■■ ■■ ■ ■■■ ■■■ ■■
# ■■■■■ ■ ■■■ ■■■■■
"""
Copyright (c) 2020.1.17 [el.ideal-ideas]
This software is released under the MIT License.
see LICENSE.txt or following URL.
https://www.el-ideal-ideas.com/MocaLog/LICENSE/
"""
# -- Imports --------------------------------------------------------------------------
from .. import core
from typing import Optional
from ssl import SSLContext
from sanic import Sanic
from socket import AF_INET6, SOCK_STREAM, socket
from sanic.websocket import WebSocketProtocol
# -------------------------------------------------------------------------- Imports --
# -- Run --------------------------------------------------------------------------
def run_websocket_server(app: Sanic,
ssl: Optional[SSLContext],
host: str,
port: int,
access_log: bool = False,
debug: bool = False,
use_ipv6: bool = False,
workers=1) -> None:
"""Run Sanic server."""
try:
if use_ipv6:
sock = socket(AF_INET6, SOCK_STREAM)
sock.bind((host, port))
app.run(sock=sock,
access_log=access_log,
ssl=ssl,
debug=debug,
workers=workers,
protocol=WebSocketProtocol)
else:
app.run(host=host,
port=port,
access_log=access_log,
ssl=ssl,
debug=debug,
workers=workers,
protocol=WebSocketProtocol)
except OSError as os_error:
core.print_warning(f'Sanic Websocket Server stopped. Please check your port is usable. <OSError: {os_error}>')
except Exception as other_error:
core.print_warning(f'Sanic Websocket Server stopped, unknown error occurred. <Exception: {other_error}>')
finally:
if use_ipv6:
sock.close()
# -------------------------------------------------------------------------- Run --
| [
"el.idealideas@gmail.com"
] | el.idealideas@gmail.com |
4cc4a9f6af19cb45b334d02cac35add8080a7c70 | 39bae52d75b501f6db49b05480d5bb185c2370be | /my_work/btv/btv舆情早前版本/btv舆情_非精简版/btv_舆情/read_WEIBO_USER.py | dc1105b64aa42fb718e430a61e8f295129d1d165 | [] | no_license | callmeivy/Pycharm_Project | 65c53de17a3902c565e13c8b69d134daf97ba6f8 | ec1637683ee1f9d6d3f3533d9de9e913eb779898 | refs/heads/master | 2021-08-23T07:11:11.542918 | 2017-12-04T02:27:44 | 2017-12-04T02:27:44 | 112,572,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,220 | py | #coding=utf-8
import sys,os
import MySQLdb
import time
from collections import Counter
reload(sys)
sys.setdefaultencoding('utf8')
import datetime
import requests
import json
import base64
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
import requests
def issuccessful(request):
if 200 <= request.status_code and request.status_code <= 299:
return True
else:
return False
def mentioned_trend(baseurl,mysqlhostIP, mysqlUserName = 'root', mysqlPassword = '', dbname = 'btv_v2'):
list_key_words = list()
# 存储评论数据
# 连接数据库
print(base64.b64decode(b'Q29weXJpZ2h0IChjKSAyMDEyIERvdWN1YmUgSW5jLiBBbGwgcmlnaHRzIHJlc2VydmVkLg==').decode())
sqlConn=MySQLdb.connect(host=mysqlhostIP, user=mysqlUserName, passwd=mysqlPassword, db = dbname, charset='utf8')
sqlcursor = sqlConn.cursor()
sqlcursor.execute('''CREATE TABLE IF NOT EXISTS key_kk_buzz(pk bigint NOT NULL PRIMARY KEY AUTO_INCREMENT, keywords varchar(50)) DEFAULT CHARSET=utf8;''')
print '新建库成功'
os.popen('kinit -k -t /home/ctvit/ctvit.keytab ctvit')
kerberos_auth = HTTPKerberosAuth(mutual_authentication=OPTIONAL)
tablename = "DATA:WEIBO_USER"
r = requests.get(baseurl + "/" + tablename + "/*", auth=kerberos_auth, headers = {"Accept" : "application/json"})
if issuccessful(r) == False:
print "Could not get messages from HBase. Text was:\n" + r.text
# quit()
bleats = json.loads(r.text)
box = list()
for row in bleats['Row']:
# print 000
# count+=1
message = ''
lineNumber = ''
username = ''
for cell in row['Cell']:
columnname = base64.b64decode(cell['column'])
value = cell['$']
if value == None:
print 'none'
continue
if columnname == "base_info:screen_name":
key_word = base64.b64decode(value)
print 'll', key_word
if key_word == '罗旭':
print 'ok'
# if ("北京卫视春晚" not in key_word) and ("北京台的春晚" not in key_word) and ("BTV春晚" not in key_word) and ("BTV春晚" not in key_word) and ("bTV春晚" not in key_word):
# break
# if columnname == "base_info:cdate":
# cdate = base64.b64decode(value)
# cdate = cdate.split('T')[0]
# print 'date',cdate
# if cdate not in box:
# box.append(cdate)
# for i in box:
# print i
# print 'ppp',type(key_word)
# print '11',key_word
# if key_word not in list_key_words:
# list_key_words.append(key_word)
#
# tempData = []
# for i in list_key_words:
# print 'key',i
# tempData.append(str(i))
# sqlcursor.execute('''insert into key_kk_buzz(keywords) values (%s)''',tempData)
# sqlConn.commit()
# tempData = []
sqlConn.close()
# print "key_words", i
if __name__=='__main__':
commentTest = mentioned_trend(baseurl = "http://172.28.12.34:8080", mysqlhostIP = '172.28.34.16', dbname = 'btv_v2')
| [
"c.ivy.jin@foxmail.com"
] | c.ivy.jin@foxmail.com |
52cd9f391400b90fae34f5b8dd6fd2c5e3a667c6 | 5c7fb0e2f3bc498351ba4c57247ec1637da57e21 | /python/mpi/enum.py | f4b091573d29406aba69119a5c793a4e9555f7b0 | [] | no_license | shixing/myLib | 35180e479f3701c07894f829b8f495594d23a225 | d4557fe0f07543ba588a7464b6efafebac1284a5 | refs/heads/master | 2020-05-19T13:34:34.022281 | 2017-04-10T01:08:06 | 2017-04-10T01:08:06 | 19,991,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | # copied from mpi4py-examples/09-task-pull.py
def enum(*sequential, **named):
"""
Handy way to fake an enumerated type in Python
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
tags = enum('READY', 'DONE', 'EXIT', 'START')
tags.READY # == 0
tags.READY == tags.DONE # == False | [
"shixing19910105@gmail.com"
] | shixing19910105@gmail.com |
4188530b0c1b69052fa56920b558dc66dab2cbe9 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/b7db18784e744fa39ebc6a5d607b0289.py | 9405afec6249a56d672a92e52e9a8340f2f34a94 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 327 | py | def hey(content):
"""
Simple function which passes tests ran from bob_test.py.
"""
text = content.strip()
if text.isupper():
return "Whoa, chill out!"
elif text[-1:] == "?":
return "Sure."
elif len(text) == 0:
return "Fine. Be that way!"
else:
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1d46b3c0287a5603be16485830b47c7a717b1e70 | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /P/PathSum.py | f4df2af0815725df3b70e8818ba0d2c397a4b0b4 | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | '''
-Easy-
Given the root of a binary tree and an integer targetSum, return true if the
tree has a root-to-leaf path such that adding up all the values along the path
equals targetSum.
A leaf is a node with no children.
Example 1:
Input: root = [5,4,8,11,null,13,4,7,2,null,null,null,1], targetSum = 22
Output: true
Example 2:
Input: root = [1,2,3], targetSum = 5
Output: false
Example 3:
Input: root = [1,2], targetSum = 0
Output: false
Constraints:
The number of nodes in the tree is in the range [0, 5000].
-1000 <= Node.val <= 1000
-1000 <= targetSum <= 1000
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from BinaryTree import (TreeNode, null, constructBinaryTree)
class Solution(object):
def hasPathSum(self, root, targetSum):
"""
:type root: TreeNode
:type targetSum: int
:rtype: bool
"""
if not root: return False
if not root.left and not root.right:
if targetSum == root.val: return True
else: return False
if root.left and self.hasPathSum(root.left, targetSum-root.val):
return True
if root.right and self.hasPathSum(root.right, targetSum-root.val):
return True
return False
if __name__ == "__main__":
root = constructBinaryTree([5,4,8,11,null,13,4,7,2,null,null,null,1])
print(Solution().hasPathSum(root, 22))
root = constructBinaryTree([1, 2, 3])
print(Solution().hasPathSum(root, 5))
root = constructBinaryTree([1, 2, null])
print(Solution().hasPathSum(root, 1))
| [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
2d2c15b57889aca91355673475220e7fce3cd17b | e20d947696ffb2422c5856ca1067c9a068705a82 | /day011/hw_004_元组.py | 190b4ecb34df475476aae193895ba6f42c218bd8 | [] | no_license | lxgzhw520/ZhangBaofu | 27aed80446c687890e17c35a2bc5c93a5b75462e | 401bae4b2d8e1a284d281cc29b5ed31a4e2039a4 | refs/heads/master | 2020-05-07T16:14:06.627708 | 2019-04-21T06:20:26 | 2019-04-21T06:20:26 | 180,673,589 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | # _*_ coding:UTF-8 _*_
# 开发人员: 理想国真恵玩-张大鹏
# 开发团队: 理想国真恵玩
# 开发时间: 2019-04-12 08:44
# 文件名称: hw_004_元组.py
# 开发工具: PyCharm
# 元组和列表类似 不过列表是用()定义的 值不可修改
t = () # 定义一个空元组
print(type(t))
t1 = (1,) # 定义一个元素的列表
print(type(t1)) # type用来打印一个数据的类型
t2 = (1) # 注意一个元素必须加逗号,否则不是元组类型
print(type(t2))
# 元组推导式 这种方法是不行的
t = (i for i in range(10) if i % 2 == 0)
print(t)
print(type(t))
# 要快速生成元组,需要先生成列表,再将列表转换为元组
# 注意这里的推导式 加了一个判断条件
# 格式[元素 元素取值 元素取值的过滤条件]
t = tuple([i for i in range(10) if i % 2 == 0])
print(t)
print(type(t))
# 适用于列表的方法,一般也适用于元组
print('--' * 22)
# 长度
print(len(t))
# 统计
print(t.count(2))
# 索引
print(t.index(2))
# 排序 是不可用的
# t.sort()
# 反转 也不可用
# t.reverse()
# 访问 用索引
print(t[1])
| [
"zhangdapeng@zhangdapengdeMacBook-Pro.local"
] | zhangdapeng@zhangdapengdeMacBook-Pro.local |
adda1694dfde2bc1d6bbc99f30c38673d8050ccf | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/api/paddlebase/test_fill_diagonal_.py | 93120e73f50302acf34f960d7acf4c341ceb6a23 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 4,229 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test_fill_diagonal_
"""
import pytest
import numpy as np
import paddle
import paddle.device as device
# global params
types = [np.float32, np.float64, np.int32, np.int64]
if device.is_compiled_with_cuda() is True:
places = [paddle.CPUPlace(), paddle.CUDAPlace(0)]
else:
# default
places = [paddle.CPUPlace()]
def fill_diagonal_base(x, value, offset=0, warp=False):
"""
api calculate
"""
outputs, gradients = [], []
for place in places:
for t in types:
paddle.disable_static(place)
y = x.astype(t)
y = paddle.to_tensor(y)
y.stop_gradient = False
y = y * 2
y.retain_grads()
out = paddle.Tensor.fill_diagonal_(y, value, offset, warp)
outputs.append(out.numpy())
loss = paddle.sum(out)
loss.backward()
gradients.append(y.grad.numpy())
return outputs, gradients
@pytest.mark.api_base_fill_diagonal_vartype
def test_fill_diagonal_base():
"""
base
"""
x = np.zeros((3, 3))
out, grad = fill_diagonal_base(x, 1)
res_out = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
res_grad = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])
length = len(out)
for i in range(length):
assert np.allclose(out[i], res_out)
assert np.allclose(grad[i], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_0():
"""
default: wrap = False
"""
x = np.zeros((5, 3))
out, grad = fill_diagonal_base(x, 1)
res_out = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
res_grad = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_1():
"""
offset = 1
value = 4
"""
x = np.zeros((3, 3))
out, grad = fill_diagonal_base(x, 4, offset=1)
res_out = np.array([[0.0, 4.0, 0.0], [0.0, 0.0, 4.0], [0.0, 0.0, 0.0]])
res_grad = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]])
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
# @pytest.mark.api_base_fill_diagonal_parameters
# def test_fill_diagonal_2():
# """
# offset = -1
# value = -4
# """
# x = np.zeros((3, 3))
# out, grad = fill_diagonal_base(x, -4, offset=-1)
# res_out = np.array([[0., 0., 0.],
# [-4., 0., 0.],
# [0., -4., 0.]])
# res_grad = np.array([[1., 1., 1.],
# [0., 1., 1.],
# [1., 0., 1.]])
#
# assert np.allclose(out[0], res_out)
# assert np.allclose(grad[0], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_3():
"""
wrap = True
"""
x = np.zeros((7, 3))
out, grad = fill_diagonal_base(x, 4, warp=True)
res_out = np.array(
[
[4.0, 0.0, 0.0],
[0.0, 4.0, 0.0],
[0.0, 0.0, 4.0],
[0.0, 0.0, 0.0],
[4.0, 0.0, 0.0],
[0.0, 4.0, 0.0],
[0.0, 0.0, 4.0],
]
)
res_grad = np.array(
[
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
]
)
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_4():
"""
default: Multidimensional
all dimensions of input must be of equal length
"""
x = np.zeros((2, 2, 2))
out, grad = fill_diagonal_base(x, 1)
res_out = np.array([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 1.0]]])
res_grad = np.array([[[0.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 0.0]]])
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
a1ba025fe318dd09aeeb95e54f2a655205cf044f | 0466559817d3a1be9409da2c83db99c4db3bacfe | /hubcheck/pageobjects/widgets/members_profile_citizenship.py | ec9f66eaa2fd00b4a7f93c8527708680e75ad588 | [
"MIT"
] | permissive | ken2190/hubcheck | 955cf9b75a1ee77e28256dfd3a780cfbc17de961 | 2ff506eb56ba00f035300862f8848e4168452a17 | refs/heads/master | 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,551 | py | from hubcheck.pageobjects.basepageelement import Radio
from hubcheck.pageobjects.basepageelement import Select
from hubcheck.pageobjects.widgets.members_profile_element import MembersProfileElement
class MembersProfileCitizenship(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileCitizenship,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileCitizenship_Locators = self.load_class('MembersProfileCitizenship_Locators')
# update this object's locator
self.locators.update(MembersProfileCitizenship_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.coriginus = Radio(self,{'Yes':'coriginus_yes','No':'coriginus_no'})
self.corigin = Select(self,{'base':'corigin'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary with the values of coriginus, corigin, and access"""
return {'coriginus' : self.coriginus.value(),
'corigin' : self.corigin.value(),
'access' : self.access.value()}
def update(self,coriginus=None,corigin=None,access=None):
"""update the values of coriginus, corigin, and access"""
if coriginus != None:
self.coriginus.value = coriginus
if corigin != None:
self.corigin.value = corigin
if access != None:
self.access.value = access
self.save.click()
class MembersProfileCitizenship_Locators_Base(object):
"""locators for MembersProfileCitizenship object"""
locators = {
'base' : "css=.profile-countryorigin",
'coriginus_yes' : "css=#corigin_usyes",
'coriginus_no' : "css=#corigin_usno",
'corigin' : "css=#corigin",
'access' : "css=.profile-countryorigin select[name='access[countryorigin]']",
'sectionkey' : "css=.profile-countryorigin .key",
'sectionvalue' : "css=.profile-countryorigin .value",
'open' : "css=.profile-countryorigin .edit-profile-section",
'close' : "css=.profile-countryorigin .edit-profile-section",
'save' : "css=.profile-countryorigin .section-edit-submit",
'cancel' : "css=.profile-countryorigin .section-edit-cancel",
}
| [
"telldsk@gmail.com"
] | telldsk@gmail.com |
c5547a73de98204b49faad630d70600c0e27b32d | fd3c3ab6482c91e2ac6e497f89ed525eb93a7047 | /tests/test_stimulus.py | 6210872d3c6bd772e8d86955b420021640b855fd | [
"MIT"
] | permissive | alex-vrv/pyABF | 5c4d71f078f4b9bf6e95a1b58d10d9ca6510fed6 | 9ec95539f0130c307e6fa9b6edc980178b9cb6f7 | refs/heads/master | 2023-04-08T13:33:02.571622 | 2021-04-17T17:07:11 | 2021-04-17T17:07:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,427 | py | """
Tests related to locating and reading command waveforms from stimulus waveform
files. If the stimulus waveforms aren't found you can provide a search path
as an argument when instantiating pyabf.ABF()
"""
import sys
import pytest
import os
import numpy as np
import time
import warnings
try:
# this ensures pyABF is imported from this specific path
sys.path.insert(0, "src")
import pyabf
except:
raise ImportError("couldn't import local pyABF")
ABF_PATH = os.path.abspath("data/abfs/H19_29_150_11_21_01_0011.abf")
STIM_FOLDER = os.path.abspath("data/stimulusFiles")
def test_findStimulusFile_NansIfNotFound():
"""When the stimulus file isn't found the waveform should be all NANs."""
warnings.simplefilter("ignore")
abf = pyabf.ABF(ABF_PATH)
stimulus = abf.stimulusByChannel[0]
waveform = stimulus.stimulusWaveform(stimulusSweep=0)
assert isinstance(waveform, np.ndarray)
assert len(waveform) == len(abf.sweepY)
assert np.isnan(waveform).all()
def test_findStimulusFile_foundIfPathGiven():
"""The user can tell pyABF where to look for stimulus files."""
abf = pyabf.ABF(ABF_PATH, stimulusFileFolder=STIM_FOLDER)
stimulus = abf.stimulusByChannel[0]
waveform = stimulus.stimulusWaveform(stimulusSweep=0)
assert isinstance(waveform, np.ndarray)
assert not np.isnan(waveform).any()
assert pytest.approx(waveform[100000], 76.261)
def cachedStimulusSpeedBoost(useCaching):
"""Open an ABF/stimulus twice and return the times (in sec)"""
times = [None, None]
useCaching = [False, useCaching]
for i in range(2):
t1 = time.perf_counter()
abf = pyabf.ABF(
ABF_PATH,
stimulusFileFolder=STIM_FOLDER,
cacheStimulusFiles=useCaching[i]
)
stimulus = abf.stimulusByChannel[0]
waveform = stimulus.stimulusWaveform(stimulusSweep=0)
assert pytest.approx(waveform[100000], 76.261)
times[i] = time.perf_counter() - t1
speedBoost = times[0]/times[1]
print(f"Caching: {useCaching[1]}, speed boost: {speedBoost}x")
return speedBoost
def test_stimulus_caching():
# first try without caching
assert (cachedStimulusSpeedBoost(False) < 2)
# now use caching for a >10x speed boost
assert (cachedStimulusSpeedBoost(True) > 10)
# confirm not using caching is still slow
assert (cachedStimulusSpeedBoost(False) < 2)
| [
"swharden@gmail.com"
] | swharden@gmail.com |
be1bb4ed6662e9ff830d39528774f26d5040e745 | 94f304cb4c2ac2ad6ff1ee39725f46254c8838bc | /core/draw/Ui_draw_point.py | 7ec948d966ec3c42e306b90ab1b088857653a980 | [] | no_license | kmolLin/python3_solve_dynamic | 105bd70edaa5014e0ad76a9a3c66e43dc0fa5ad7 | 18f56e6958dd1816dfb7c26f4857aa3b41de9312 | refs/heads/master | 2021-06-03T10:19:44.551240 | 2016-09-23T13:22:52 | 2016-09-23T13:22:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,397 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/ahshoe/Desktop/Pyslvs/core/draw/draw_point.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(377, 219)
Dialog.setMinimumSize(QtCore.QSize(377, 219))
Dialog.setMaximumSize(QtCore.QSize(377, 219))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/point.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
Dialog.setSizeGripEnabled(False)
Dialog.setModal(True)
self.horizontalLayout = QtWidgets.QHBoxLayout(Dialog)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setTextFormat(QtCore.Qt.RichText)
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.label_4 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setTextFormat(QtCore.Qt.RichText)
self.label_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.Point_num = QtWidgets.QTextBrowser(Dialog)
self.Point_num.setMaximumSize(QtCore.QSize(16777215, 30))
self.Point_num.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.Point_num.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.Point_num.setObjectName("Point_num")
self.verticalLayout.addWidget(self.Point_num)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 1, 1, 1)
self.X_coordinate = QtWidgets.QLineEdit(Dialog)
self.X_coordinate.setInputMethodHints(QtCore.Qt.ImhLowercaseOnly)
self.X_coordinate.setText("")
self.X_coordinate.setObjectName("X_coordinate")
self.gridLayout.addWidget(self.X_coordinate, 1, 0, 1, 1)
self.Y_coordinate = QtWidgets.QLineEdit(Dialog)
self.Y_coordinate.setText("")
self.Y_coordinate.setEchoMode(QtWidgets.QLineEdit.Normal)
self.Y_coordinate.setClearButtonEnabled(False)
self.Y_coordinate.setObjectName("Y_coordinate")
self.gridLayout.addWidget(self.Y_coordinate, 1, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Vertical)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout_2.addWidget(self.buttonBox)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.Fix_Point = QtWidgets.QCheckBox(Dialog)
self.Fix_Point.setObjectName("Fix_Point")
self.verticalLayout_2.addWidget(self.Fix_Point)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "New Point"))
self.label_3.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-size:12pt;\">Setting Coordinates for the New Point.</span></p></body></html>"))
self.label_4.setText(_translate("Dialog", "<html><head/><body><p>Point Number</p></body></html>"))
self.Point_num.setWhatsThis(_translate("Dialog", "Name for next point."))
self.label.setText(_translate("Dialog", "x coordinate"))
self.label_2.setText(_translate("Dialog", "y coordinate"))
self.X_coordinate.setPlaceholderText(_translate("Dialog", "0.0"))
self.Y_coordinate.setPlaceholderText(_translate("Dialog", "0.0"))
self.Fix_Point.setText(_translate("Dialog", "&Fixed"))
import icons_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"smpss91341@gmail.com"
] | smpss91341@gmail.com |
3e3528b72cecdd0356d6df40b19ea4c3497bb400 | 914ca4921c114c917267214e0987ebecf30b3510 | /Programming_Practice/Python/Base/Bigdata_day1010/LIST08.py | aab3742c91537c9b07e87bb9007499fe748b1f30 | [] | no_license | BurnFaithful/KW | 52535030ea57f1489a0d108d599b66ffee50a1f4 | 15deb50449b8f902f623f20b97448c0f473a9342 | refs/heads/master | 2022-12-20T16:06:01.827398 | 2020-09-12T08:51:23 | 2020-09-12T08:51:23 | 294,897,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | # 리스트 조작 함수 그 외
mylist = [30, 10, 20]
print("현재 리스트 :", mylist)
mylist.append(40)
print("append(40) 후 :", mylist)
res = mylist.pop()
print("pop()으로 추출한 값 :", res)
print("현재 리스트 :", mylist)
mylist.sort()
print("sort() 후 :", mylist)
mylist.reverse()
print("reverse() 후 :", mylist)
val = mylist.index(20)
print("index(20) :", val)
mylist.insert(2, 222)
print("insert(2, 222) 후 :", mylist)
mylist.remove(222)
print("remove(222) 후 :", mylist)
mylist.extend([77, 88, 99])
print("extend([77, 88, 99]) 후 :", mylist)
cnt = mylist.count(77)
print("현재 리스트의 77 개수 :", cnt)
ary = [1, 3, 5, 6, 7, 4, 10, 66, 9, 99]
temp = sorted(ary)
print(ary)
print(temp) | [
"burnfaithful@gmail.com"
] | burnfaithful@gmail.com |
45098d49bfd0204d1df0e2b2fdf4155f7bc7261a | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /Leetcode/Algorithm/python/1000/00944-Delete Columns to Make Sorted.py | 01c17945b8dcf7591fc445ab750d72a8edfe98a0 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | class Solution(object):
def minDeletionSize(self, A):
n = len(A[0])
st = []
for i in xrange(n):
st.append(''.join(map(lambda x: x[i], A)))
res = 0
for s in st:
if s != ''.join(sorted(s)):
res += 1
return res
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
ccc0db47bb35b02bf55b2a3b7b26bd606bcd75a7 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /log-20190927/132.230.102.123-10.21.11.20/1569575874.py | 5de66d09d31823420aabdb67b028801eeaf683e5 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,761 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def divisior(n: int) -> list:
"""Eine Funktion, die alle Dividenten einer positiven,
ganzen Zahl in einer Liste wiedergibt
"""
j = [n]
for d in range(n+1): #loop bis n
d > 0
if abs(n) % int(d) == 0:
j.append(str(d))
return j
else:
return j
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(n):
nonlocal covered, count
if n <= 0:
covered.add(0)
if n == 1:
covered.add(1)
r = func (n)
lenr = len (r)
if lenr == 1:
covered.add(2)
if lenr == 2:
covered.add(3)
if (lenr > 2) and ( lenr % 2 == 0):
covered.add(4)
if lenr > 2 and lenr % 2 == 1:
covered.add(5)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
divisors = coverage(divisors)
except:
pass
## Lösung Teil 2. (Tests)
def test_divisior():
assert divisior(6) == ["1","2","3","6"]
assert divisior(3) == ["3"]
assert divisior(-3) == ["3"]
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_divisors (self):
assert divisors
assert 'n' in getfullargspec(divisors).args
class TestGrades:
def test_docstring_present(self):
assert divisors.__doc__ is not None
def test_typing_present(self):
assert divisors.__hints__ == typing.get_type_hints(self.divisors_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def divisors_oracle(self, n:int)->list:
return [ d for d in range (1, n + 1) if n % d == 0 ]
def check_divisors (self, x):
assert set(divisors (x)) == set(self.divisors_oracle (x))
def test_correctness(self):
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
3a04f547f002847a7ded45264a9b924b04ad80c2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/2604.py | 17221a93a4af173a541d76a67dca1f017c864d35 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | f = open('A-small-attempt5.in')
lines = f.readline()
inputlist = f.readlines()
linewords =[]
TranslatedWord = ''
TranslatedWords = []
TranslatedSentence = ''
outputlist=[]
tongues = {}
tongues['a']='y'
tongues['b']='h'
tongues['c']='e'
tongues['d']='s'
tongues['e']='o'
tongues['f']='c'
tongues['g']='v'
tongues['h']='x'
tongues['i']='d'
tongues['j']='u'
tongues['k']='i'
tongues['l']='g'
tongues['m']='l'
tongues['n']='b'
tongues['o']='k'
tongues['p']='r'
tongues['q']='z'
tongues['r']='t'
tongues['s']='n'
tongues['t']='w'
tongues['u']='j'
tongues['v']='p'
tongues['w']='f'
tongues['x']='m'
tongues['y']='a'
tongues['z']='q'
for i in inputlist:
linewords = i.split( )
#print linewords
for j in linewords:
for letters in j:
TranslatedWord = TranslatedWord + tongues[letters]
TranslatedWords.append(TranslatedWord)
TranslatedWord = ''
#print TranslatedWords
for word in TranslatedWords:
TranslatedSentence = TranslatedSentence + ' ' + word
x = len(outputlist)
outputlist.append('Case #' + str(x+1) + ':' + TranslatedSentence + '\n')
#print TranslatedSentence
TranslatedSentence = ''
TranslatedWords=[]
#Now lets print results
results = open('output.txt', 'w')
results.writelines(outputlist)
for outputSentence in outputlist:
print outputSentence
results.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
05f1f72fb9d2533b3aba6ac889a694e0e1edc5f1 | 3088dc21f3e5eeb31575704712a695d71772495f | /torch/_C/_lazy.pyi | e86b80837d5898fd477b7f63c5e7ad387a2a65de | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | ezyang/pytorch | 8e82444c78025ff12fa605a62a18acfc43b176b8 | 6fc64500d1af9ee1306c7695ab0a2ff01852ff00 | refs/heads/master | 2023-09-02T01:56:28.108400 | 2022-06-07T19:13:21 | 2022-06-07T19:13:21 | 101,798,885 | 3 | 0 | NOASSERTION | 2022-08-23T22:10:07 | 2017-08-29T19:28:39 | C++ | UTF-8 | Python | false | false | 878 | pyi | from typing import List
from torch import Tensor
#defined in torch/csrc/lazy/python/init.cpp
def _mark_step(device: str, devices: List[str], wait: bool): ...
def _wait_device_ops(devices: List[str]): ...
def _reset_metrics(): ...
def _counter_names() -> List[str]: ...
def _counter_value(name: str) -> int: ...
def _get_graph_hash(tensors: List[Tensor]) -> str: ...
def _sync_multi(tensors: List[Tensor], devices: List[str], wait: bool = True, sync_ltc_data: bool = True): ...
def _get_tensor_id(tensor: Tensor) -> int: ...
def _get_tensors_text(tensors: List[Tensor]) -> str: ...
def _get_tensors_dot(tensors: List[Tensor]) -> str: ...
def _get_tensors_backend(tensors: List[Tensor]) -> str: ...
def _get_force_fallback() -> str: ...
def _set_force_fallback(newval: str): ...
def _clear_ir_cache(): ...
def _dump_ir_cache(filename: str): ...
def _set_reuse_ir(val: bool): ...
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
eddda169430ed0378114c2ea9afbf5730a434155 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02984/s103576149.py | 4b0ac84178304706c240d4938f971696c04a987b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | n=int(input())
a=list(map(int,input().split()))
s=sum(a)
ans=[]
s1=0
for i in range(n):
if i%2==1:
s1+=a[i]
ans.append(str(s-s1*2))
for j in range(n-1):
ans.append(str(a[j]*2-int(ans[-1])))
print(' '.join(ans)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b973456b943c3feb54008e7162ec38a69daa125a | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008-EOL/programming/languages/perl/perl-Clone/actions.py | a535f852d7ba91e57c9e618fdef343f9b88e8c49 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2007,2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import perlmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "%s-%s" % (get.srcNAME()[5:], get.srcVERSION())
def setup():
perlmodules.configure()
def build():
perlmodules.make()
def check():
perlmodules.make("test")
def install():
perlmodules.install()
pisitools.dodoc("Changes", "MANIFEST")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
1bb3a9f8dfa507a3d76443d3a52f62ed9e64ba3a | eac7ae395c4832ac394087054ab014d1d6a9f6a6 | /python_experiments/data_analysis/figures_icde19/parse_others/parse_hidx.py | b9b210f1b1b13d1f29e2f0c48b1ac00e6f5122c1 | [
"MIT"
] | permissive | mexuaz/AccTrussDecomposition | 21be22007e1c50ca4b7df6fbbad1dfbf4c2fffae | 15a9e8fd2f123f5acace5f3b40b94f1a74eb17d4 | refs/heads/master | 2022-12-14T03:41:05.133564 | 2020-09-03T00:35:33 | 2020-09-03T00:35:33 | 291,565,779 | 0 | 0 | MIT | 2020-08-30T22:57:55 | 2020-08-30T22:57:55 | null | UTF-8 | Python | false | false | 1,989 | py | from data_analysis.util.read_file_utils_updated import *
from config import *
from exec_utilities import exec_utils
import json
others_time_hidx = 'h-idx'
def fetch_statistics(root_dir, dataset_lst, t_num, algorithm, json_file_path):
# Dataset -> Detailed Time Info
my_dict = dict()
for dataset in dataset_lst:
file_path = os.sep.join([root_dir, dataset, t_num, algorithm + '.log'])
logger.info(file_path)
lines = get_file_lines(file_path)
time_iter = list(filter(lambda l: 'Total time' in l, lines)) if lines is not None else None
if time_iter is None or len(list(time_iter)) == 0:
my_dict[dataset] = 0
else:
tmp = time_iter[0]
print(tmp)
my_dict[dataset] = eval(tmp.split(':')[-1].replace('secs', ''))
with open(json_file_path, 'w') as ofs:
ofs.write(json.dumps(my_dict, indent=4))
if __name__ == '__main__':
base_dir = '/home/yche/'
os.system('mkdir -p {}logs/'.format(base_dir))
my_res_log_file_folder = 'exp-2019-10-07-hidx'
log_path = my_res_log_file_folder + '.log'
logger = exec_utils.get_logger('{}logs/'.format(base_dir) + log_path, __name__)
hostname = 'ustgpu2'
root_dir = '{}mnt/ustgpu2/workspace/yche/git-repos/' \
'OutOfCoreSCAN/python_experiments/exp_results/{}/ustgpu2'.format(base_dir, my_res_log_file_folder)
os.system('mkdir -p {}'.format(others_time_hidx))
for my_md_algorithm_name in ['pnd-2300', 'hidx-org-2300']:
json_file_path = './{}/{}.json'.format(others_time_hidx, my_md_algorithm_name)
# Dataset -> Detailed Time Info
dataset_lst = [
'snap_orkut', 'webgraph_uk', 'webgraph_webbase',
'webgraph_eu', 'webgraph_it', 'webgraph_twitter']
reorder_tag = 'org'
fetch_statistics(root_dir=root_dir, dataset_lst=dataset_lst, t_num='40',
algorithm=my_md_algorithm_name, json_file_path=json_file_path)
| [
"yche@cse.ust.hk"
] | yche@cse.ust.hk |
3ac4f8b2a990c75001a1b676c0dcdbdcfaa0819c | 59b0ebc4249f20edd0e87dc63784c6e8c138c7fd | /.history/roman_20180615012149.py | 1f4d4bae42ab05f4189eed06aa87328cc9a2eeb8 | [] | no_license | Los4U/first_python_programs | f397da10be3ef525995f3f220e3b60012a6accaa | c3fc33a38c84abd292cb2e86de63e09434fc7fc4 | refs/heads/master | 2020-03-22T08:09:40.426118 | 2018-07-04T17:17:58 | 2018-07-04T17:17:58 | 139,748,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | number = 568
print("1000", number//1000)
print("900", number//900)
print("500", number//500)
print("400", number//400)
print("100", number//100)
print("90", number//90)
print("50", number//50)
print("40", number//40)
print("10", number//10)
print("9", number//9)
print("5", number//5)
print("4", number//4)
print("1", number//1)
change = [[1000, 'M'], [900, 'CM'], [500, 'D'], [400, 'CD'],
[ 100, 'C'], [ 90, 'XC'], [ 50, 'L'], [ 40, 'XL'],
[ 10, 'X'], [ 9, 'IX'], [ 5, 'V'], [ 4, 'IV'],
[ 1, 'I']]
result = ''
for denom, roman_digit in conv:
result += roman_digit*(number//denom)
number %= denom
print(number , " - ", result)
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
12623598889120e6503dc91e9996fe6e19049188 | eb9e5f950f567458deb7ac6a958e9e07eec8211c | /Python/Projects/mysite/im/views.py | d6a2010f3c6fe4d3720effe6c19e7f063374e179 | [] | no_license | hyteer/ytest | b32402f4a85af2cba298729b81ae73ccedbe6013 | 98234f88e923a705ce08673a269904ca81117f03 | refs/heads/master | 2020-01-23T21:47:40.100472 | 2017-01-23T10:12:21 | 2017-01-23T10:12:21 | 74,676,200 | 0 | 0 | null | 2017-01-23T10:12:22 | 2016-11-24T13:34:34 | JavaScript | UTF-8 | Python | false | false | 1,691 | py | # encoding: utf-8
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views import generic
from .forms import RoomLabelForm
from .models import Message, Room
# Create your views here.
def index(req):
#return HttpResponse("hi...")
if req.method == 'POST':
form = RoomLabelForm(req.POST)
if form.is_valid():
room_label = form.cleaned_data['room_label']
return HttpResponseRedirect('/im/room/%s' % room_label)
else:
form = RoomLabelForm()
return render(req, 'im/index.html', {"form": form})
def room(request, label):
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = reversed(room.messages.order_by('-time')[:50])
return render(request, "im/room.html", {
'room': room,
'messages': messages,
})
class RoomList(generic.ListView):
template_name = 'im/roomlist.html'
context_object_name = 'room_list'
def get_queryset(self):
"""Return the last five published questions."""
return Room.objects.all()[:5]
'''
def room_new(request, label):
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = reversed(room.messages.order_by('-timestamp')[:50])
return render(request, "realtime/room_new.html", {
'room': room,
'messages': messages,
})
''' | [
"hyteer@qq.com"
] | hyteer@qq.com |
f8a8e65317639d4af42307e7fb7372570acb74ba | 9cb4b0753f1392b488547395c43a8a6df5789a7a | /test/test_print_formats.py | c83cd4697133d482824149cf907251f9e8e95df5 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ZryletTC/archapp | db20cbcbf8135cc364d7dc0dda00a4409ff05800 | 68299fa3e35c292cff33bba55a3a75e9ae568815 | refs/heads/master | 2021-07-23T00:16:10.902137 | 2018-09-17T18:05:33 | 2018-09-17T18:05:33 | 233,143,295 | 0 | 0 | NOASSERTION | 2020-01-10T23:30:32 | 2020-01-10T22:50:21 | null | UTF-8 | Python | false | false | 319 | py | import unittest
from archapp.util import print_formats
class PrintFormatsTestCase(unittest.TestCase):
def test_print_list_no_crash(self):
print_formats.list_print([], do_print=False)
print_formats.list_print(["text"], do_print=False)
print_formats.list_print(["text"] * 50, do_print=False)
| [
"zlentz@slac.stanford.edu"
] | zlentz@slac.stanford.edu |
909fdf9e128bce0236a62e8ff7811d35593840e1 | 97a4d29863d1ce96f366554fdd985c3ce580bb5d | /038.py | 0992e125a2c74680dc26c628fe36a109f19fe972 | [] | no_license | Everfighting/Python-Algorithms | 5c3a102fed3a29858f3112d657c69e077efc7e28 | 235e9b4c66602035be39a8d3b3ad9cf016aebbb9 | refs/heads/master | 2021-01-20T22:19:18.902687 | 2018-03-02T05:38:27 | 2018-03-02T05:38:27 | 61,302,323 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
a = []
sum = 0.0
for i in range(3):
for j in range(3):
a.append(float(raw_input("input num:\n")))
for i in range(3):
sum += a[3*i+i]
#比原来的解法更加容易理解!
print sum
| [
"cbb903601682@163.com"
] | cbb903601682@163.com |
2f05dd68471f8c482bab11750f30469d447bc5fd | 5de646fb3ecf10ecb45e05018a23b6345fb9ca53 | /kickstart/2020 Round A/workout.py | 3228eaf2dddec7155a79d9a7818a00d038790e31 | [] | no_license | PPinto22/LeetCode | 5590d6ca87efcd29f9acd2eaed1bcf6805135e29 | 494a35542b61357c98c621202274d774e650a27c | refs/heads/master | 2022-04-29T20:37:31.085120 | 2022-04-02T12:02:30 | 2022-04-02T12:02:30 | 201,478,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | import math
def solve(N, K, sessions):
diffs = [sessions[i + 1] - sessions[i] for i in range(N - 1)]
return min_binary_search(1, max(diffs), constraints, K, diffs)
# Validate if it is possible to achieve the given difficulty
# with less than 'max_splits' splits
def constraints(difficulty, max_splits, diffs):
splits = sum(get_splits(diffs, difficulty))
return splits <= max_splits
# Find the minimum value between lower and upper
# that meets the given constraints
def min_binary_search(lower, upper, constraints, *args):
while lower <= upper:
middle = (lower + upper) // 2
if constraints(middle, *args):
upper = middle - 1
else:
lower = middle + 1
return lower
# Lists how many additional sessions must be added between
# each session get a the difficulty of 'target' (at most)
def get_splits(diffs, target):
return [math.ceil(diff / target) - 1 for diff in diffs]
if __name__ == '__main__':
T = int(input())
for Ti in range(1, T + 1):
# N: Sessions; K: Additional sessions
N, K = map(int, input().split())
sessions = list(map(int, input().split()))
result = solve(N, K, sessions)
print('Case #{}: {}'.format(Ti, result), flush=True)
| [
"pedropinto24@hotmail.com"
] | pedropinto24@hotmail.com |
a7fe54623cf7e91b74619370ecdf197fd332ce39 | 83727bce4680f56234b7ca35ab3fe99cd0cb0d3e | /lib/nark/__init__.py | c0748f97d6f7c712e456b91d78fa6e0469f45dfb | [] | no_license | onethousandfaces/rpg | 62647b2dd6ad0c253ed363f3bcd340706f075082 | 219b3f865c99d5619ec0a63f5e18ac1f0f064413 | refs/heads/master | 2021-01-15T16:16:11.037991 | 2013-04-07T07:41:20 | 2013-04-07T07:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from Enum import *
import log
import ioc
from Log import Log
from Register import Register
__all__ = [ enum, bitflags, log, Log, Register, ioc ]
| [
"linderd@iinet.net.au"
] | linderd@iinet.net.au |
93aa7788b41ca27080912ddcf5f42c76086fa66a | f7ae3a193cf672f2c7edf27518f6d3871f635bce | /tools/gen_daily_areas_scotland.py | dd1391d07c85027df83bc161ea627e10c7746a1b | [] | no_license | lozenge0/covid-19-uk-data | 262c158f27734dc0d8b0e3d28c21be613465eebe | e9f2c927e0be2e4301921d423108160e4a272ade | refs/heads/master | 2021-03-26T01:50:55.964597 | 2020-03-15T21:58:24 | 2020-03-15T21:58:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | #!/usr/bin/env python
# Extract local authority case data (England) or health board data (Scotland) from an HTMLpage and save in CSV format.
from bs4 import BeautifulSoup
import csv
import pandas as pd
import re
import sys
html_file = sys.argv[1]
csv_file = sys.argv[2]
# Get upper tier local authority name to code mapping.
# Note that this does not include Scotland, but that's OK as Scotland areas are health boards, not local authorities.
la_mapping = pd.read_csv(
"data/raw/Lower_Tier_Local_Authority_to_Upper_Tier_Local_Authority_April_2019_Lookup_in_England_and_Wales.csv"
)
la_name_to_code = dict(zip(la_mapping["UTLA19NM"], la_mapping["UTLA19CD"]))
la_name_to_code["Cornwall and Isles of Scilly"] = la_name_to_code["Cornwall"]
la_name_to_code["Hackney and City of London"] = la_name_to_code["Hackney"]
m = re.match(".+-(.+)-(\d{4}-\d{2}-\d{2})\.html", html_file)
country = m.group(1).title()
date = m.group(2)
html = open(html_file).read()
soup = BeautifulSoup(html, features="html.parser")
table = soup.find_all("table")[-1]
output_rows = [["Date", "Country", "AreaCode", "Area", "TotalCases"]]
for table_row in table.findAll("tr"):
columns = table_row.findAll("td")
if len(columns) == 0:
continue
output_row = [date, country, la_name_to_code.get(columns[0].text, "")]
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
with open(csv_file, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(output_rows)
| [
"tom.e.white@gmail.com"
] | tom.e.white@gmail.com |
ddea386e7d4b21095806b4773a9d65d07e26e84f | d5d9996c55414561fe77a2630ad7e0cfff0735ad | /pddm/statstics_anlysis/bootstrapped/__init__.py | f83d651288bd9cb793be2ee6023a791a593a03fa | [
"Apache-2.0"
] | permissive | kyo-kutsuzawa/EEI_Analysis_model_based_rl | 4b704875619be0045b6f3b1ad12e86bd1041347c | d83ad7a7da936672a05ccacc6846d16c33421b96 | refs/heads/main | 2023-01-11T22:36:49.348156 | 2020-11-10T04:54:21 | 2020-11-10T04:54:21 | 313,227,869 | 0 | 0 | Apache-2.0 | 2020-11-16T07:55:31 | 2020-11-16T07:55:30 | null | UTF-8 | Python | false | false | 295 | py | # Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
| [
"a.hamada@neuro.mech.tohoku.ac.jp"
] | a.hamada@neuro.mech.tohoku.ac.jp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.