blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
507e82f5adc9314085b0165139719ae82759ed26 | 33e2187c1815b1e1209743f5a4870401d2097d71 | /CTCI/Linked Lists/q2.3.py | 8437767ca78a47b5f0d14ead1c26ca2024f034c0 | [] | no_license | sachinjose/Coding-Prep | 8801e969a3608b5e69dc667cba7f3afaf7273e88 | 95f6bc85e7c38034e358af47ef4c228937cd4629 | refs/heads/master | 2022-12-26T22:49:48.510197 | 2020-09-22T07:05:55 | 2020-09-22T07:05:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | class Node:
def __init__(self, item):
self.value = item
self.next = None
def del_mid_node(node):
front = node.next
node.value = front.value
node.next = front.next
front.next = None
def print_ll(node):
i = node
while i.next != None :
print(i.value)
i = i.next
return 1
a = Node(1)
b = Node(2)
c = Node(3)
d = Node(4)
e = Node(5)
a.next = b
b.next = c
c.next = d
d.next = e
print_ll(a)
print()
del_mid_node(c)
print()
print_ll(a)
| [
"sachinjose16@gmail.com"
] | sachinjose16@gmail.com |
a2dd768f8a186fca693fd3dd4d8504d4e289e3bd | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/change_enterprise_realname_authentication_request.py | ffce54df52b790c9f5b5cf55ebbdeb800872ff27 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ChangeEnterpriseRealnameAuthenticationRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'ChangeEnterpriseRealnameAuthsReq'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""ChangeEnterpriseRealnameAuthenticationRequest - a model defined in huaweicloud sdk"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this ChangeEnterpriseRealnameAuthenticationRequest.
:return: The body of this ChangeEnterpriseRealnameAuthenticationRequest.
:rtype: ChangeEnterpriseRealnameAuthsReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ChangeEnterpriseRealnameAuthenticationRequest.
:param body: The body of this ChangeEnterpriseRealnameAuthenticationRequest.
:type: ChangeEnterpriseRealnameAuthsReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChangeEnterpriseRealnameAuthenticationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
d0026f19a145876761727a4f6448e85456207581 | 4d5aa9cafa363de94fa87211503f4416d8c3904e | /dbaas/physical/admin/replication_topology.py | e1e93b7c50b541953eb5cc34a14ba25c719bd062 | [] | permissive | jaeko44/python_dbaas | 0c77da58c4e72719126d69535ac7a16e9ef27d34 | 4fafa4ad70200fec1436c326c751761922ec9fa8 | refs/heads/master | 2020-12-03T00:18:15.535812 | 2017-04-20T21:16:50 | 2017-04-20T21:16:50 | 96,011,945 | 0 | 0 | BSD-3-Clause | 2020-04-04T05:16:53 | 2017-07-02T08:46:17 | Python | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
class ReplicationTopologyAdmin(admin.ModelAdmin):
list_filter = ("has_horizontal_scalability", "engine")
search_fields = ("name",)
list_display = ("name", "versions", "has_horizontal_scalability")
save_on_top = True
def versions(self, obj):
return ", ".join([str(engine.version) for engine in obj.engine.all()])
| [
"mauro_murari@hotmail.com"
] | mauro_murari@hotmail.com |
b523666030e030ea978ef346ea49101a187f219c | 7c85bf860949ee2c9245530a0c2b40de5b2181f9 | /albert_lstm_crf/albert/lcqmc_progressor.py | 7af945b348e3b4de4f91545fb4e80ed146465983 | [] | no_license | wjunneng/2019-FlyAI-Chinese-Named-Entity-Recognition | 6bc081e5d8cc8828af48a3d104240c86a0dcc03c | 308aa38673b8d1fc1a7c70f9d2b6599a29abcf4d | refs/heads/master | 2022-04-05T03:14:22.749509 | 2020-01-15T08:29:52 | 2020-01-15T08:29:52 | 226,505,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,996 | py | import torch
import csv
from callback.progressbar import ProgressBar
from model.tokenization_bert import BertTokenizer
from common.tools import logger
from torch.utils.data import TensorDataset
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeature(object):
"""
A single set of features of data.
"""
def __init__(self, input_ids, input_mask, segment_ids, label_id, input_len):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.input_len = input_len
class BertProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, vocab_path, do_lower_case):
self.tokenizer = BertTokenizer(vocab_path, do_lower_case)
def get_train(self, data_file):
"""Gets a collection of `InputExample`s for the train set."""
return self.read_data(data_file)
def get_dev(self, data_file):
"""Gets a collection of `InputExample`s for the dev set."""
return self.read_data(data_file)
def get_test(self, lines):
return lines
def get_labels(self):
"""Gets the list of labels for this data set."""
return ["0", "1"]
@classmethod
def read_data(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def truncate_seq_pair(self, tokens_a, tokens_b, max_length):
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_examples(self, lines, example_type, cached_examples_file):
"""
Creates examples for data
"""
pbar = ProgressBar(n_total=len(lines), desc='create examples')
if cached_examples_file.exists():
logger.info("Loading examples from cached file %s", cached_examples_file)
examples = torch.load(cached_examples_file)
else:
examples = []
for i, line in enumerate(lines):
guid = '%s-%d' % (example_type, i)
text_a = line[0]
text_b = line[1]
label = line[2]
label = int(label)
example = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)
examples.append(example)
pbar(step=i)
logger.info("Saving examples into cached file %s", cached_examples_file)
torch.save(examples, cached_examples_file)
return examples
def create_features(self, examples, max_seq_len, cached_features_file):
"""
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
"""
pbar = ProgressBar(n_total=len(examples), desc='create features')
if cached_features_file.exists():
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
features = []
for ex_id, example in enumerate(examples):
tokens_a = self.tokenizer.tokenize(example.text_a)
tokens_b = None
label_id = example.label
if example.text_b:
tokens_b = self.tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self.truncate_seq_pair(tokens_a, tokens_b, max_length=max_seq_len - 3)
else:
# Account for [CLS] and [SEP] with '-2'
if len(tokens_a) > max_seq_len - 2:
tokens_a = tokens_a[:max_seq_len - 2]
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ['[SEP]']
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_len - len(input_ids))
input_len = len(input_ids)
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_len
assert len(input_mask) == max_seq_len
assert len(segment_ids) == max_seq_len
if ex_id < 2:
logger.info("*** Example ***")
logger.info(f"guid: {example.guid}" % ())
logger.info(f"tokens: {' '.join([str(x) for x in tokens])}")
logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
logger.info(f"input_mask: {' '.join([str(x) for x in input_mask])}")
logger.info(f"segment_ids: {' '.join([str(x) for x in segment_ids])}")
logger.info(f"label id : {label_id}")
feature = InputFeature(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
input_len=input_len)
features.append(feature)
pbar(step=ex_id)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
return features
def create_dataset(self, features):
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
| [
"1194348056@qq.com"
] | 1194348056@qq.com |
832d3933942ae2d8daff1fd7920625da1b66c86c | 54f068e9cc75e2f8526b84f5d4692e7132ae4e3b | /utils/metrics.py | 29678b7db1c7fb09dcf0cc9b04442db09ca70f41 | [] | no_license | ChendongLi/LightGBM-with-Focal-Loss | 36f9260a4140a69fc4c6dfe5fb06e77db257e962 | edb4fdc003d007c1887482cbf6cd3f0a534a9370 | refs/heads/master | 2022-03-06T18:24:57.335602 | 2019-11-09T19:56:45 | 2019-11-09T19:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | import numpy as np
import lightgbm as lgb
from sklearn.metrics import f1_score
from scipy.misc import derivative
def sigmoid(x): return 1./(1. + np.exp(-x))
def best_threshold(y_true, pred_proba, proba_range, verbose=False):
"""
Function to find the probability threshold that optimises the f1_score
Comment: this function is not used in this repo, but I include it in case the
it useful
Parameters:
-----------
y_true: numpy.ndarray
array with the true labels
pred_proba: numpy.ndarray
array with the predicted probability
proba_range: numpy.ndarray
range of probabilities to explore.
e.g. np.arange(0.1,0.9,0.01)
Return:
-----------
tuple with the optimal threshold and the corresponding f1_score
"""
scores = []
for prob in proba_range:
pred = [int(p>prob) for p in pred_proba]
score = f1_score(y_true,pred)
scores.append(score)
if verbose:
print("INFO: prob threshold: {}. score :{}".format(round(prob,3), round(score,5)))
best_score = scores[np.argmax(scores)]
optimal_threshold = proba_range[np.argmax(scores)]
return (optimal_threshold, best_score)
def focal_loss_lgb(y_pred, dtrain, alpha, gamma):
"""
Focal Loss for lightgbm
Parameters:
-----------
y_pred: numpy.ndarray
array with the predictions
dtrain: lightgbm.Dataset
alpha, gamma: float
See original paper https://arxiv.org/pdf/1708.02002.pdf
"""
a,g = alpha, gamma
y_true = dtrain.label
def fl(x,t):
p = 1/(1+np.exp(-x))
return -( a*t + (1-a)*(1-t) ) * (( 1 - ( t*p + (1-t)*(1-p)) )**g) * ( t*np.log(p) + (1-t)*np.log(1-p) )
partial_fl = lambda x: fl(x, y_true)
grad = derivative(partial_fl, y_pred, n=1, dx=1e-6)
hess = derivative(partial_fl, y_pred, n=2, dx=1e-6)
return grad, hess
def focal_loss_lgb_eval_error(y_pred, dtrain, alpha, gamma):
"""
Adapation of the Focal Loss for lightgbm to be used as evaluation loss
Parameters:
-----------
y_pred: numpy.ndarray
array with the predictions
dtrain: lightgbm.Dataset
alpha, gamma: float
See original paper https://arxiv.org/pdf/1708.02002.pdf
"""
a,g = alpha, gamma
y_true = dtrain.label
p = 1/(1+np.exp(-y_pred))
loss = -( a*y_true + (1-a)*(1-y_true) ) * (( 1 - ( y_true*p + (1-y_true)*(1-p)) )**g) * ( y_true*np.log(p)+(1-y_true)*np.log(1-p) )
return 'focal_loss', np.mean(loss), False
def lgb_f1_score(preds, lgbDataset):
"""
Implementation of the f1 score to be used as evaluation score for lightgbm
Parameters:
-----------
preds: numpy.ndarray
array with the predictions
lgbDataset: lightgbm.Dataset
"""
binary_preds = [int(p>0.5) for p in preds]
y_true = lgbDataset.get_label()
return 'f1', f1_score(y_true, binary_preds), True
def lgb_focal_f1_score(preds, lgbDataset):
"""
Adaptation of the implementation of the f1 score to be used as evaluation
score for lightgbm. The adaptation is required since when using custom losses
the row prediction needs to passed through a sigmoid to represent a
probability
Parameters:
-----------
preds: numpy.ndarray
array with the predictions
lgbDataset: lightgbm.Dataset
"""
preds = sigmoid(preds)
binary_preds = [int(p>0.5) for p in preds]
y_true = lgbDataset.get_label()
return 'f1', f1_score(y_true, binary_preds), True | [
"jrzaurin@gmail.com"
] | jrzaurin@gmail.com |
0995853d407dcabc204161adc3c4ca37a2636203 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03326/s669032235.py | e2f14f49eb1efa395d9d49db4d17b4b77fa894e9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | n,m = map(int, raw_input().split())
r = 0
cakes = [map(int, raw_input().split()) for _ in range(n)]
for b in range(8):
cakes.sort(key = lambda x: sum([x[i] * (-1 if ((b >> i) & 1) else 1) for i in range(3) ]))
s = 0
for i in range(n-1, n - 1 - m, -1):
s += sum([cakes[i][j] * (-1 if ((b >> j) & 1) else +1) for j in range(3)])
r = max(r, s)
print r | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
96484682bf491548ed5328bef04648f80baf509c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03503/s669355709.py | 49c704585473d519a9db4a93c8490a499f0fba40 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | N = int(input())
Fs = []
for _ in range(N):
Fs.append(list(map(int, input().split())))
Ps = []
for _ in range(N):
Ps.append(list(map(int, input().split())))
def calc(isOpen):
global ans
isAllClose = True
for i in range(10):
if isOpen[i]:
isAllClose = False
if isAllClose:
return
rieki = 0
for i in range(N):
count = 0
for j in range(10):
if Fs[i][j] and isOpen[j]:
count += 1
rieki += Ps[i][count]
ans = max(ans, rieki)
def search(isOpen):
if len(isOpen) == 10:
calc(isOpen)
else:
search(isOpen + [True])
search(isOpen + [False])
ans = -float('inf')
search([])
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d17c91b48b0b2e8421bab774ed86dd07adb24bef | 62a212c3d7936c727e09b48d3c10495ea8db12fe | /src/backend/flask_interface/chapter.py | 043ccc6a4e7dd0845ac7e801c52f759896cca129 | [] | no_license | antonpaquin/Homulili | 080a2398e9ee7f19566be3de8a30903ae03a3b9e | 3c56ee5c41d5bf3f86a3325c6117d6795e12cdf2 | refs/heads/master | 2021-09-06T15:19:53.166674 | 2018-02-08T00:21:20 | 2018-02-08T00:21:20 | 110,213,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | import logging
from .common import standard_request
logger = logging.getLogger(__name__)
def create(manga_id, name, sort_key):
return standard_request(
model='chapter',
method='create',
params={
'manga_id': manga_id,
'name': name,
'sort_key': sort_key,
},
logger=logger,
)
def read(chapter_id):
return standard_request(
model='chapter',
method='read',
params={
'id': chapter_id,
},
logger=logger,
)
def update(chapter_id, name=None, manga_id=None, sort_key=None):
return standard_request(
model='chapter',
method='update',
params={
'id': chapter_id,
'name': name,
'manga_id': manga_id,
'sort_key': sort_key,
},
logger=logger,
)
def delete(chapter_id):
return standard_request(
model='chapter',
method='delete',
params={
'id': chapter_id,
},
logger=logger,
)
def index(manga_id):
"""
[
{
"id": int,
"name": str,
"sort_key": int,
},
]
"""
return standard_request(
model='chapter',
method='index',
params={
'manga_id': manga_id,
},
logger=logger,
)
| [
"antonpaquin@gmail.com"
] | antonpaquin@gmail.com |
a9d284d1c29f3355b27a4b6b659c9011c7035a01 | 0466a5dc950f4e89d8696329b89aa50246c7e7e3 | /deepwind-review/fig4_TKE.py | b4f60c186b01b542678dc16c336049133bc6c62c | [] | no_license | HansInM36/ppcode | 00bc94e6177b8110681127514517f277d7a7b07a | e5fe9de8ddf2991f2fe95bde38045ee02bbcfe10 | refs/heads/master | 2023-07-19T03:42:38.667878 | 2021-09-30T22:59:48 | 2021-09-30T22:59:48 | 313,005,222 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,481 | py | import os
import sys
sys.path.append('/scratch/ppcode/standard')
sys.path.append('/scratch/ppcode/standard/palm_std')
sys.path.append('/scratch/ppcode/standard/sowfa_std')
import imp
import palm_data_ext
from palm_data_ext import *
import sowfa_data_ext_L2
from sowfa_data_ext_L2 import *
import numpy as np
import matplotlib.pyplot as plt
""" SOWFA """
prjDir = '/scratch/sowfadata/JOBS'
prjName = 'deepwind'
jobName = 'gs10_refined'
ppDir_0 = '/scratch/sowfadata/pp/' + prjName + '/' + jobName
tSeq_0, zSeq_0, rsvSeq_0, sgsSeq_0, totSeq_0 = TKE_sowfa(ppDir_0, ((0,0,0),30), 0)
rsvSeq_0 = TKE_av_sowfa(rsvSeq_0, tSeq_0, zSeq_0.size, (3600.0,151200.0))
sgsSeq_0 = TKE_av_sowfa(sgsSeq_0, tSeq_0, zSeq_0.size, (3600.0,151200.0))
totSeq_0 = TKE_av_sowfa(totSeq_0, tSeq_0, zSeq_0.size, (3600.0,151200.0))
""" PALM """
prjDir = '/scratch/palmdata/JOBS/Deepwind'
jobName = 'deepwind_gs5'
dir = prjDir + '/' + jobName
tSeq_4, zSeq_4, rsvSeq_4, sgsSeq_4, totSeq_4 = TKE_palm(dir, jobName, ['.010','.011'])
rsvSeq_4 = rsvSeq_4[-1]
sgsSeq_4 = sgsSeq_4[-1]
totSeq_4 = totSeq_4[-1]
""" TKE group plot """
zi = 700
fig = plt.figure()
fig.set_figwidth(6)
fig.set_figheight(6)
rNum, cNum = (1,2)
axs = fig.subplots(nrows=rNum, ncols=cNum)
axs[0].plot(rsvSeq_0[0::3], zSeq_0[0::3]/zi, label='sowfa-rsv', marker='', markersize=1, linestyle='--', linewidth=1.0, color='r')
axs[0].plot(sgsSeq_0[0::3], zSeq_0[0::3]/zi, label='sowfa-sgs', marker='', markersize=1, linestyle=':', linewidth=1.0, color='r')
axs[0].plot(totSeq_0[0::3], zSeq_0[0::3]/zi, label='sowfa-tot', marker='', markersize=1, linestyle='-', linewidth=1.0, color='r')
axs[0].plot(rsvSeq_4, zSeq_4/zi, label='palm-rsv', marker='', markersize=1, linestyle='--', linewidth=1.0, color='b')
axs[0].plot(sgsSeq_4, zSeq_4/zi, label='palm-sgs', marker='', markersize=1, linestyle=':', linewidth=1.0, color='b')
axs[0].plot(totSeq_4, zSeq_4/zi, label='palm-tot', marker='', markersize=1, linestyle='-', linewidth=1.0, color='b')
#axs[0].set_xlim(0.0,0.5)
axs[0].set_ylim(0.0,1.0)
#axs[0].set_xticklabels([0.0,0.2,0.4],fontsize=20)
for tick in axs[0].xaxis.get_major_ticks():
tick.label.set_fontsize(20)
axs[0].set_yticklabels([0.0,0.2,0.4,0.6,0.8,1.0],fontsize=20)
axs[0].set_xlabel(r'$\mathrm{e}$ $(\mathrm{m^2/s^2})$', fontsize=20)
axs[0].set_ylabel(r'$\mathrm{z_i}$', fontsize=20)
axs[0].grid()
# axs[0].legend(loc='upper right', bbox_to_anchor=(0.9,0.9), ncol=1, mode='None', borderaxespad=0, fontsize=12)
axs[1].plot(funcs.flt_seq(rsvSeq_0[0::3]/totSeq_0[0::3]*100,0), zSeq_0[0::3]/zi, label='sowfa', marker='', markersize=1, linestyle='-', linewidth=1.0, color='r')
axs[1].plot(rsvSeq_4/totSeq_4*100, zSeq_4/zi, label='palm', marker='', markersize=1, linestyle='-', linewidth=1.0, color='b')
axs[1].set_xlim(60.0,100.0)
axs[1].set_ylim(0.0,1.0); axs[1].set_yticklabels([])
axs[1].set_xticklabels([60,70,80,90,100],fontsize=20)
axs[1].set_xlabel(r'$\mathrm{e_{rsv}/e_{tot}}$ (%)', fontsize=20)
axs[1].grid()
# axs[1].legend(loc='upper left', bbox_to_anchor=(0.1,0.9), ncol=1, mode='None', borderaxespad=0, fontsize=12)
handles, labels = axs[0].get_legend_handles_labels()
lgdord = [0,3,1,4,2,5]
fig.legend([handles[i] for i in lgdord], [labels[i] for i in lgdord], loc='upper center', bbox_to_anchor=(0.5,0.86), ncol=1, mode='None', borderaxespad=0, fontsize=18)
saveDir = '/scratch/projects/deepwind/photo/review'
saveName = 'fig4_TKE.png'
plt.savefig(saveDir + '/' + saveName, bbox_inches='tight')
plt.show() | [
"xni001@gfi3104118.klientdrift.uib.no"
] | xni001@gfi3104118.klientdrift.uib.no |
3d61802c5666a8f8f7ba46bfd6447c11fc437c7f | 8b5fc00f5ec726a6f7f95806bfef0836341b925c | /posts/views.py | 7d0fcca802622e0a1f619d4fcf98a6fdd9b597b1 | [] | no_license | TareqMonwer/drf-blog-api | ae09d6dd484600e53ec109aef44203e353bbe5e9 | 2f5feb5c6540937589865827126052d5e3df2302 | refs/heads/master | 2022-12-21T03:23:11.252508 | 2021-06-04T06:50:42 | 2021-06-04T06:50:42 | 231,928,429 | 2 | 0 | null | 2022-12-09T05:20:50 | 2020-01-05T14:20:10 | Python | UTF-8 | Python | false | false | 1,256 | py | from django.contrib.auth import get_user_model
from rest_framework import generics, permissions
from rest_framework import viewsets
from .serializers import PostSerializer, UserSerializer
from .permissions import IsAuthorOrReadOnly
from .models import Post
class PostViewsets(viewsets.ModelViewSet):
permission_classes = (IsAuthorOrReadOnly,)
queryset = Post.objects.all()
serializer_class = PostSerializer
class UserViewsets(viewsets.ModelViewSet):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
# # THESE VIEWS ARE REPLACED BY VIEWSETS DESCRIBED ABOVE
# class PostList(generics.ListCreateAPIView):
# queryset = Post.objects.all()
# serializer_class = PostSerializer
# class PostDetail(generics.RetrieveUpdateDestroyAPIView):
# permission_classes = (IsAuthorOrReadOnly, )
# queryset = Post.objects.all()
# serializer_class = PostSerializer
# class UserList(generics.ListCreateAPIView):
# queryset = get_user_model().objects.all()
# serializer_class = UserSerializer
# class UserDetail(generics.RetrieveUpdateDestroyAPIView):
# permission_classes = (permissions.IsAdminUser, )
# queryset = get_user_model().objects.all()
# serializer_class = UserSerializer | [
"tareqmonwer137@gmail.com"
] | tareqmonwer137@gmail.com |
f4821de951254d90c2a5a3596e891a557b05b01c | d0b4aebfde0c268df3456f4783cb3b8217a5fc4a | /trailingZeros.py | 85a8788ba53153aff8aea0c4e8a7d0f7defebc92 | [] | no_license | kns94/algorithms_practice | a42adf3c55383df8d41e7862caef7437fd6207ae | 6dfdffc075488af717b4e8d486bc3a9222f2721c | refs/heads/master | 2020-12-24T10:52:06.448310 | 2017-01-18T06:31:37 | 2017-01-18T06:31:37 | 73,129,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | import math
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0 or n == 1:
return 0
count5 = 0
i = 1
while 1:
five_power = pow(5,i)
if five_power <= n:
count5 += int(n/five_power)
else:
break
i += 1
return count5
import sys
print Solution().trailingZeroes(int(sys.argv[1])) | [
"kns971@gmail.com"
] | kns971@gmail.com |
4829a2a9fcc7d02ba61654e17872292ce81df8ac | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/aio/_configuration.py | 2c331f35d733d1b680d8fd78368d8f4befc0d50c | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 3,324 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ComputeManagementClientConfiguration(Configuration):
"""Configuration for ComputeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ComputeManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2019-11-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-compute/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
22c6b2d0385de20ddad130b398ff3e6a01df299d | 6af96cf3c590a5418e87873e892fe704698c8ef8 | /70_defaultdict.py | 281ba6267c4a2054f8881edf257e87b39e5ec63f | [] | no_license | vikasjoshis001/Python-Course | f228ed362160831ee00c8498e679186463887982 | 40efa480b3b39b3abd1b2a0c6bad0af3db2ce205 | refs/heads/master | 2023-05-03T15:37:20.841229 | 2021-05-21T14:41:18 | 2021-05-21T14:41:18 | 283,190,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from collections import defaultdict
integer = input()
z = integer.split(" ")
numbers = []
for i in range(len(z)):
numbers.append(int(z[i]))
n = numbers[0]
m = numbers[1]
val = 0
letters1 = []
letters2 = []
for i in range(n):
letter1 = input()
letters1.append(letter1)
for j in range(m):
letter2 = input()
letters2.append(letter2)
for i in range(len(letters2)):
val = 0
for j in range(len(letters1)):
if letters2[i] == letters1[j]:
print(j+1,end=" ")
else:
val += 1
if (val == len(letters1)):
print(-1,end=" ")
print("\r") | [
"vikasjoshis001@gmail.com"
] | vikasjoshis001@gmail.com |
a64a9b4eb6ae0419fb6af4b76c697c99733b6cf5 | 51d7e8c09793b50d45731bd5ab9b531b525cf6db | /src/garage/replay_buffer/her_replay_buffer.py | f791f14c354f608206c914eb901b7dbd7924e91a | [
"MIT"
] | permissive | fangqyi/garage | 454247849a6a3f547557b3fac3787ba9eeb0391f | ddafba385ef005f46f913ab352f9638760e5b412 | refs/heads/master | 2023-02-25T00:43:18.903328 | 2021-01-26T01:52:15 | 2021-01-26T01:52:15 | 267,667,220 | 0 | 0 | MIT | 2020-05-28T18:35:08 | 2020-05-28T18:35:07 | null | UTF-8 | Python | false | false | 7,276 | py | """This module implements a Hindsight Experience Replay (HER).
See: https://arxiv.org/abs/1707.01495.
"""
import inspect
import numpy as np
from garage.replay_buffer.replay_buffer import ReplayBuffer
def make_her_sample(replay_k, reward_fun):
"""Generate a transition sampler for HER ReplayBuffer.
Args:
replay_k (float): Ratio between HER replays and regular replays
reward_fun (callable): Function to re-compute the reward with
substituted goals
Returns:
callable: A function that returns sample transitions for HER.
"""
future_p = 1 - (1. / (1 + replay_k))
def _her_sample_transitions(episode_batch, sample_batch_size):
"""Generate a dictionary of transitions.
Args:
episode_batch (dict): Original transitions which
transitions[key] has shape :math:`(N, T, S^*)`.
sample_batch_size (int): Batch size per sample.
Returns:
dict[numpy.ndarray]: Transitions.
"""
# Select which episodes to use
time_horizon = episode_batch['action'].shape[1]
rollout_batch_size = episode_batch['action'].shape[0]
episode_idxs = np.random.randint(rollout_batch_size,
size=sample_batch_size)
# Select time steps to use
t_samples = np.random.randint(time_horizon, size=sample_batch_size)
transitions = {
key: episode_batch[key][episode_idxs, t_samples]
for key in episode_batch.keys()
}
her_idxs = np.where(
np.random.uniform(size=sample_batch_size) < future_p)
future_offset = np.random.uniform(
size=sample_batch_size) * (time_horizon - t_samples)
future_offset = future_offset.astype(int)
future_t = (t_samples + future_offset)[her_idxs]
future_ag = episode_batch['achieved_goal'][episode_idxs[her_idxs],
future_t]
transitions['goal'][her_idxs] = future_ag
achieved_goals = episode_batch['achieved_goal'][episode_idxs[her_idxs],
t_samples[her_idxs]]
transitions['achieved_goal'][her_idxs] = achieved_goals
# Re-compute reward since we may have substituted the goal.
reward_params_keys = inspect.signature(reward_fun).parameters.keys()
reward_params = {
rk: transitions[k]
for k, rk in zip(['next_achieved_goal', 'goal'],
list(reward_params_keys)[:-1])
}
reward_params['info'] = {}
transitions['reward'] = reward_fun(**reward_params)
transitions = {
k: transitions[k].reshape(sample_batch_size,
*transitions[k].shape[1:])
for k in transitions.keys()
}
goals = transitions['goal']
next_inputs = np.concatenate((transitions['next_observation'], goals,
transitions['achieved_goal']),
axis=-1)
inputs = np.concatenate(
(transitions['observation'], goals, transitions['achieved_goal']),
axis=-1)
transitions['observation'] = inputs
transitions['next_observation'] = next_inputs
assert transitions['action'].shape[0] == sample_batch_size
return transitions
return _her_sample_transitions
class HerReplayBuffer(ReplayBuffer):
"""Replay buffer for HER (Hindsight Experience Replay).
It constructs hindsight examples using future strategy.
Args:
replay_k (float): Ratio between HER replays and regular replays
reward_fun (callable): Function to re-compute the reward with
substituted goals
env_spec (garage.envs.EnvSpec): Environment specification.
size_in_transitions (int): total size of transitions in the buffer
time_horizon (int): time horizon of rollout.
"""
def __init__(self, replay_k, reward_fun, env_spec, size_in_transitions,
time_horizon):
self._env_spec = env_spec
self._sample_transitions = make_her_sample(replay_k, reward_fun)
self._replay_k = replay_k
self._reward_fun = reward_fun
super().__init__(env_spec, size_in_transitions, time_horizon)
def sample(self, batch_size):
"""Sample a transition of batch_size.
Args:
batch_size (int): Batch size to sample.
Return:
dict[numpy.ndarray]: Transitions which transitions[key] has the
shape of :math:`(N, S^*)`. Keys include [`observation`,
`action`, `goal`, `achieved_goal`, `terminal`,
`next_observation`, `next_achieved_goal` and `reward`].
"""
buffer = {}
for key in self._buffer:
buffer[key] = self._buffer[key][:self._current_size]
transitions = self._sample_transitions(buffer, batch_size)
for key in (['reward', 'next_observation', 'next_achieved_goal'] +
list(self._buffer.keys())):
assert key in transitions, 'key %s missing from transitions' % key
return transitions
def __getstate__(self):
"""Object.__getstate__.
Returns:
dict: The state to be pickled for the instance.
"""
new_dict = self.__dict__.copy()
del new_dict['_sample_transitions']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__.
Args:
state (dict): Unpickled state.
"""
self.__dict__ = state
replay_k = state['_replay_k']
reward_fun = state['_reward_fun']
self._sample_transitions = make_her_sample(replay_k, reward_fun)
def add_transitions(self, **kwargs):
"""Add multiple transitions into the replay buffer.
A transition contains one or multiple entries, e.g.
observation, action, reward, terminal and next_observation.
The same entry of all the transitions are stacked, e.g.
{'observation': [obs1, obs2, obs3]} where obs1 is one
numpy.ndarray observation from the environment.
Args:
kwargs (dict(str, [numpy.ndarray])): Dictionary that holds
the transitions.
"""
obses = kwargs['observation']
obs = [obs['observation'] for obs in obses]
d_g = [obs['desired_goal'] for obs in obses]
a_g = [obs['achieved_goal'] for obs in obses]
next_obses = kwargs['next_observation']
super().add_transitions(
observation=obs,
action=kwargs['action'],
goal=d_g,
achieved_goal=a_g,
terminal=kwargs['terminal'],
next_observation=[
next_obs['observation'] for next_obs in next_obses
],
next_achieved_goal=[
next_obs['achieved_goal'] for next_obs in next_obses
],
)
| [
"qiaoyi.fang@duke.edu"
] | qiaoyi.fang@duke.edu |
6b930c08f6dc07b90cf59fb9cb1ac9a3830f6e29 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/fluid/tests/unittests/hybrid_parallel_pp_alexnet.py | 2b85788ae56c620704877df1e7e4b190686738d1 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 4,361 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import numpy as np
import random
import paddle
import paddle.distributed as dist
import paddle.distributed.fleet as fleet
from hybrid_parallel_pp_layer import AlexNetPipeDesc, AlexNet
def set_random_seed(seed, dp_id, rank_id):
"""Set random seed for reproducability."""
random.seed(seed)
np.random.seed(seed + dp_id)
paddle.seed(seed + dp_id)
batch_size = 4
micro_batch_size = 2
class TestDistPPTraning(unittest.TestCase):
def setUp(self):
strategy = fleet.DistributedStrategy()
self.model_parallel_size = 1
self.data_parallel_size = 1
self.pipeline_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": self.data_parallel_size,
"mp_degree": self.model_parallel_size,
"pp_degree": self.pipeline_parallel_size,
}
strategy.pipeline_configs = {
"accumulate_steps": batch_size // micro_batch_size,
"micro_batch_size": micro_batch_size
}
fleet.init(is_collective=True, strategy=strategy)
def build_optimizer(self, model):
scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[2],
values=[0.001, 0.002],
verbose=True)
optimizer = paddle.optimizer.SGD(learning_rate=scheduler,
parameters=model.parameters())
return scheduler, optimizer
def test_pp_model(self):
hcg = fleet.get_hybrid_communicate_group()
word_size = hcg.get_model_parallel_world_size()
dp_id = hcg.get_data_parallel_rank()
pp_id = hcg.get_stage_id()
rank_id = dist.get_rank()
set_random_seed(1024, dp_id, rank_id)
#construct model a
model_a = AlexNet(10)
scheduler_a, optimizer_a = self.build_optimizer(model_a)
param_len = len(model_a.parameters())
parameters = []
for param in model_a.parameters():
parameters.append(param.numpy())
# construct model b
model_b = AlexNetPipeDesc(num_stages=self.pipeline_parallel_size)
scheduler_b, optimizer_b = self.build_optimizer(model_b)
model_b = fleet.distributed_model(model_b)
optimizer_b = fleet.distributed_optimizer(optimizer_b)
for idx, param in enumerate(model_b.parameters()):
param.set_value(parameters[idx + pp_id * (param_len // 2)])
# construct reader
train_reader = paddle.batch(paddle.dataset.mnist.train(),
batch_size=batch_size,
drop_last=True)
for step_id, data in enumerate(train_reader()):
x_data = np.array([x[0] for x in data]).astype('float32').reshape(
batch_size, 1, 28, 28)
y_data = np.array([x[1] for x in data
]).astype('int64').reshape(batch_size, 1)
img = paddle.to_tensor(x_data)
label = paddle.to_tensor(y_data)
img.stop_gradient = True
label.stop_gradient = True
if step_id >= 5:
return True
loss_a = model_a(img, label)
loss_a.backward()
optimizer_a.step()
optimizer_a.clear_grad()
scheduler_a.step()
loss_b = model_b.train_batch([img, label], optimizer_b, scheduler_b)
print("loss: ", loss_a.numpy(), loss_b.numpy())
np.testing.assert_allclose(loss_a.numpy(),
loss_b.numpy(),
rtol=5e-5)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | Qengineering.noreply@github.com |
de88cd83e3b8a51b96ce1b2e81d3970b1a7214e0 | c4430be891d7dcf2e0239daef571aa11e6b122d9 | /first_project/myvenv/Scripts/django-admin.py | 971d8096b9930fc04492a4475e82de237994acd8 | [] | no_license | dimka1993kh/Dj_HW_1 | c7c2965445a3aedc7d8ef0297e021a3c13993f4c | abe871fd416d8756040ba4d0997ed9b912997488 | refs/heads/master | 2023-04-05T08:08:23.299196 | 2021-04-04T14:38:32 | 2021-04-04T14:38:32 | 354,566,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | #!c:\users\khmel\desktop\learn python\django\dj_hw_1\first-project\first_project\myvenv\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"dimka1993kh@gmail.com"
] | dimka1993kh@gmail.com |
207f4756d70c535fbb750eda3d45d712a85888c4 | 61ed20e6b48b6b1eeadb81a54fbb7b41422b0a45 | /Paramable.py | db51399142ff44247f5f82fc4d713574d8eafef0 | [] | no_license | Sandy4321/CPT-Plus-Python | 2d57e16549c4b6c95018985a62242f8291bb6b3b | a9b591850f87265d9914dad01666e400b3c111bd | refs/heads/main | 2023-04-26T05:26:45.864688 | 2021-05-28T08:12:26 | 2021-05-28T08:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | class Paramable():
parameters= {}
def __init__(self,Parameters= None):
self.parameters= Parameters
def setParameters(self,Parameters):
self.parameters= Parameters
def paramDouble(self,name):
value= self.parameters.get(name)
return self.parameters.get(name) if value is not None else None
def paramDoubleOrDefault(self,paramName,defaultValue):
param= self.paramDouble(paramName)
return param if param is not None else defaultValue
def paramInt(self,name):
value= self.parameters.get(name)
return self.parameters.get(name) if value is not None else None
def paramIntOrDefault(self,paramName,defaultValue):
param= self.paramInt(paramName)
return param if param is not None else defaultValue
def paramFloat(self,name):
value= self.parameters.get(name)
return self.parameters.get(name) if value is not None else None
def paramFloatOrDefault(self,paramName,defaultValue):
param= self.paramFloat(paramName)
return param if param is not None else defaultValue
def paramBool(self,name):
value= self.parameters.get(name)
return self.parameters.get(name) if value is not None else None
def paramBoolOrDefault(self,paramName,defaultValue):
param= self.paramBool(paramName)
return param if param is not None else defaultValue
| [
"noreply@github.com"
] | Sandy4321.noreply@github.com |
e65f8759871d46b0463a8e7457ec37b01d0a83f3 | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /atcoder/arc/arc041/b.py | f8131fe642418ae62dd1e8cb36ea5b96495ceec6 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | dxy = zip([1, 0, -1, 0], [0, 1, 0, -1])
N, M = map(int, raw_input().split())
b = [map(int, list(raw_input())) for i in xrange(N)]
a = [[0] * M for i in xrange(N)]
k = 1
for d in xrange(N / 2 + 1):
for y in [d, N - 1 - d]:
for x in xrange(1, M - 1):
if b[y][x] != 0:
a[y + k][x] += b[y][x]
tmp = b[y][x]
for dx, dy in dxy:
b[y + k + dy][x + dx] -= tmp
k *= -1
for x in [0, M - 1]:
for y in xrange(1, N - 1):
if b[y][x] != 0:
a[y][x + k] += b[y][x]
for line in a:
print "".join(map(str, line))
| [
"roiti46@gmail.com"
] | roiti46@gmail.com |
46776886973da6232431438c8a45777e116011fd | ef1d38cfef63f22e149d6c9dd14e98955693c50d | /webhook/protos/pogoprotos/networking/requests/social/register_push_notification_message_pb2.py | 1e4e24b2159c6c9ed0aa4e88909c80e38daba977 | [] | no_license | Kneckter/WebhookListener | 4c186d9012fd6af69453d9d51ae33a38aa19b5fd | ea4ff29b66d6abf21cc1424ed976af76c3da5511 | refs/heads/master | 2022-10-09T04:26:33.466789 | 2019-11-24T17:30:59 | 2019-11-24T17:30:59 | 193,372,117 | 2 | 0 | null | 2022-09-23T22:26:10 | 2019-06-23T16:39:34 | Python | UTF-8 | Python | false | true | 7,374 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/social/register_push_notification_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/social/register_push_notification_message.proto',
package='pogoprotos.networking.requests.social',
syntax='proto3',
serialized_pb=_b('\nNpogoprotos/networking/requests/social/register_push_notification_message.proto\x12%pogoprotos.networking.requests.social\"\xe9\x02\n\x1fRegisterPushNotificationMessage\x12\x62\n\tapn_token\x18\x01 \x01(\x0b\x32O.pogoprotos.networking.requests.social.RegisterPushNotificationMessage.ApnToken\x12\x62\n\tgcm_token\x18\x02 \x01(\x0b\x32O.pogoprotos.networking.requests.social.RegisterPushNotificationMessage.GcmToken\x1aY\n\x08\x41pnToken\x12\x17\n\x0fregistration_id\x18\x01 \x01(\t\x12\x19\n\x11\x62undle_identifier\x18\x02 \x01(\t\x12\x19\n\x11payload_byte_size\x18\x03 \x01(\x05\x1a#\n\x08GcmToken\x12\x17\n\x0fregistration_id\x18\x01 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REGISTERPUSHNOTIFICATIONMESSAGE_APNTOKEN = _descriptor.Descriptor(
name='ApnToken',
full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.ApnToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='registration_id', full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.ApnToken.registration_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bundle_identifier', full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.ApnToken.bundle_identifier', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payload_byte_size', full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.ApnToken.payload_byte_size', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=357,
serialized_end=446,
)
_REGISTERPUSHNOTIFICATIONMESSAGE_GCMTOKEN = _descriptor.Descriptor(
name='GcmToken',
full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.GcmToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='registration_id', full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.GcmToken.registration_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=448,
serialized_end=483,
)
_REGISTERPUSHNOTIFICATIONMESSAGE = _descriptor.Descriptor(
name='RegisterPushNotificationMessage',
full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='apn_token', full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.apn_token', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gcm_token', full_name='pogoprotos.networking.requests.social.RegisterPushNotificationMessage.gcm_token', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_REGISTERPUSHNOTIFICATIONMESSAGE_APNTOKEN, _REGISTERPUSHNOTIFICATIONMESSAGE_GCMTOKEN, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=483,
)
_REGISTERPUSHNOTIFICATIONMESSAGE_APNTOKEN.containing_type = _REGISTERPUSHNOTIFICATIONMESSAGE
_REGISTERPUSHNOTIFICATIONMESSAGE_GCMTOKEN.containing_type = _REGISTERPUSHNOTIFICATIONMESSAGE
_REGISTERPUSHNOTIFICATIONMESSAGE.fields_by_name['apn_token'].message_type = _REGISTERPUSHNOTIFICATIONMESSAGE_APNTOKEN
_REGISTERPUSHNOTIFICATIONMESSAGE.fields_by_name['gcm_token'].message_type = _REGISTERPUSHNOTIFICATIONMESSAGE_GCMTOKEN
DESCRIPTOR.message_types_by_name['RegisterPushNotificationMessage'] = _REGISTERPUSHNOTIFICATIONMESSAGE
RegisterPushNotificationMessage = _reflection.GeneratedProtocolMessageType('RegisterPushNotificationMessage', (_message.Message,), dict(
ApnToken = _reflection.GeneratedProtocolMessageType('ApnToken', (_message.Message,), dict(
DESCRIPTOR = _REGISTERPUSHNOTIFICATIONMESSAGE_APNTOKEN,
__module__ = 'pogoprotos.networking.requests.social.register_push_notification_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.social.RegisterPushNotificationMessage.ApnToken)
))
,
GcmToken = _reflection.GeneratedProtocolMessageType('GcmToken', (_message.Message,), dict(
DESCRIPTOR = _REGISTERPUSHNOTIFICATIONMESSAGE_GCMTOKEN,
__module__ = 'pogoprotos.networking.requests.social.register_push_notification_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.social.RegisterPushNotificationMessage.GcmToken)
))
,
DESCRIPTOR = _REGISTERPUSHNOTIFICATIONMESSAGE,
__module__ = 'pogoprotos.networking.requests.social.register_push_notification_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.social.RegisterPushNotificationMessage)
))
_sym_db.RegisterMessage(RegisterPushNotificationMessage)
_sym_db.RegisterMessage(RegisterPushNotificationMessage.ApnToken)
_sym_db.RegisterMessage(RegisterPushNotificationMessage.GcmToken)
# @@protoc_insertion_point(module_scope)
| [
"kasmar@gitlab.com"
] | kasmar@gitlab.com |
349e1fc75603fb2a77c4a4ae73ce7c02cb283bba | fdca7a438cd891ba306c495adfc864155290ef59 | /correlation.py | a125394b05dd75db5b34746dae87164f8b445be1 | [] | no_license | libowei1213/SportsNews | 974487d9f8fccf53058865e01cd2bff9b48e9bb6 | b803521a2ca74e4ffe5e5b929ac40df6d34ab808 | refs/heads/master | 2020-06-10T01:22:37.085751 | 2016-12-26T05:22:30 | 2016-12-26T05:22:30 | 76,117,308 | 1 | 0 | null | 2016-12-10T14:26:29 | 2016-12-10T14:26:29 | null | UTF-8 | Python | false | false | 618 | py | # coding=utf=8
import json
from gensim.models import Word2Vec
import jieba
import pickle
import time
word2vecModel = Word2Vec.load_word2vec_format("word2vec.model", binary=True)
docSimilarDict = pickle.load(open("doc_similar_dict.bin", "rb"))
# 最相似的五个词
def getSimilarWords(query):
words = []
for word in query:
if word in word2vecModel:
words.append(word)
if words!=[]:
result = word2vecModel.most_similar(positive=words, topn=5)
return [x[0] for x in result]
else:
return []
def getSimilarDocs(docId):
return docSimilarDict[docId]
| [
"libowei123123@qq.com"
] | libowei123123@qq.com |
fba5abb5537747e7cc126ea07b763f6364349fb2 | 64bf21e9b4ca104557d05dc90a70e9fc3c3544a4 | /tests/journal.api/error_notes.py | 50feb7eb121fc99ee678a8fa0d7ab561c62092d7 | [
"BSD-3-Clause"
] | permissive | pyre/pyre | e6341a96a532dac03f5710a046c3ebbb79c26395 | d741c44ffb3e9e1f726bf492202ac8738bb4aa1c | refs/heads/main | 2023-08-08T15:20:30.721308 | 2023-07-20T07:51:29 | 2023-07-20T07:51:29 | 59,451,598 | 27 | 13 | BSD-3-Clause | 2023-07-02T07:14:50 | 2016-05-23T04:17:24 | Python | UTF-8 | Python | false | false | 1,033 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2023 all rights reserved
def test():
"""
Verify access to the channel metadata
"""
# access
import journal
# make a channel
channel = journal.error("test.channel")
# get its metadata
notes = channel.notes
# adjust the application name
notes["application"] = "error_notes"
# add something
notes["author"] = "michael"
# make sure the adjustments stick by asking for the notes once again; this step is
# non-trivial: if support is provided by the C++ library, it ensures that the notes are
# mutable
notes = channel.notes
# and comparing against expectations
assert notes["application"] == "error_notes"
assert notes["author"] == "michael"
assert notes["channel"] == "test.channel"
assert notes["severity"] == "error"
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| [
"michael.aivazis@para-sim.com"
] | michael.aivazis@para-sim.com |
39d31965ec76714a376a7a0cbb38aed5333fe64b | 114c1f7ceff04e00591f46eeb0a2eb387ac65710 | /g4g/ALGO/Searching/Coding_Problems/19_kth_smallest_element_in_row-wise_col-wise_sorted_2D_array.py | d937b87c8f1d5d3cabcec04d1e613b21de61577b | [] | no_license | sauravgsh16/DataStructures_Algorithms | 0783a5e6dd00817ac0b6f2b856ad8d82339a767d | d3133f026f972f28bd038fcee9f65784f5d3ea8b | refs/heads/master | 2020-04-23T03:00:29.713877 | 2019-11-25T10:52:33 | 2019-11-25T10:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,650 | py | ''' Kth smallest element in a row-wise and column-wise sorted 2D array '''
'''
Algorithm:
1) Build a min heap of elements from first row. A heap entry also stores
row number and column number.
2) Do following k times.
a) Get minimum element (or root) from min heap.
b) Find row number and column number of the minimum element.
c) Replace root with the next element from same column and min-heapify
the root.
3) Return the last extracted root.
'''
class HeapNode(object):
def __init__(self, val, rn, cn):
self.val = val
self.rn = rn
self.cn = cn
class MinHeap(object):
''' Min Heap '''
def __init__(self):
self.heap = []
self.size = 0
def _parent(self, idx):
parent = (idx - 1) / 2
if parent <= 0:
return 0
return parent
def _swap(self, idx1, idx2):
self.heap[idx1], self.heap[idx2] = self.heap[idx2], self.heap[idx1]
def insert(self, val, rn, cn):
newNode = HeapNode(val, rn, cn)
self.heap.append(newNode)
self.size += 1
if self.size == 1:
return
current = self.size - 1
while self.heap[current].val < self.heap[self._parent(current)].val:
self._swap(current, self._parent(current))
current = self._parent(current)
def peek(self):
return self.heap[0]
def _is_leaf(self, pos):
if pos > ((self.size - 1) / 2) and pos <= self.size - 1:
return True
return False
def _left_child(self, pos):
left = 2 * pos + 1
if left <= self.size - 1:
return left
return -1
def _right_child(self, pos):
right = 2 * pos + 2
if right <= self.size - 1:
return right
return -1
def _heapify(self, pos):
if self._is_leaf(pos):
return
left = self._left_child(pos)
right = self._right_child(pos)
if left != -1 and right != -1:
if self.heap[pos].val > self.heap[left].val or\
self.heap[pos].val > self.heap[right].val:
if self.heap[left].val < self.heap[right].val:
self._swap(pos, left)
self._heapify(left)
else:
self._swap(pos, right)
self._heapify(right)
elif left != -1:
if self.heap[pos].val > self.heap[left].val:
self._swap(pos, left)
self._heapify(left)
def replace(self, val, rn, cn):
newNode = HeapNode(val, rn, cn)
self.heap[0] = newNode
self._heapify(0)
def find_kth_smallest(arr, k):
# Insert first row in MinHeap
minHeap = MinHeap()
for cn, val in enumerate(arr[0]):
minHeap.insert(val, 0, cn) # rn is 0 as it's the first row
# Now we need to check the root value of min heap.
# We replace the value of the min heap with the next value in the same
# column as that of the root node.
# We repeat this k times
for _ in range(k):
root = minHeap.peek()
rn = root.rn + 1
cn = root.cn
# IF THE VALUE STORED AS THE ROOT IS THE LAST VALUE IN IT'S COLUMN
# THEN ASSIGN "INFINITE" AS NEXT VALUE
try:
minHeap.replace(arr[rn][cn], rn, cn)
except IndexError:
minHeap.replace(2**32, rn, cn)
for node in minHeap.heap:
print node.val, node.rn, node.cn
print root.val
arr = [
[10, 20, 30, 40],
[15, 25, 35, 45],
[24, 29, 37, 48],
[32, 33, 39, 50]
]
find_kth_smallest(arr, 15)
| [
"GhoshSaurav@JohnDeere.com"
] | GhoshSaurav@JohnDeere.com |
99a772ef56a0045b29c6d562794d22d2f7a8bfef | b0ea541c0aef0fa8946aef3130490dc4fa068e9b | /ABC_PS1/catkin_ws/build/learning_ros_noetic/Part_4/mobot_mapping/catkin_generated/pkg.installspace.context.pc.py | ed8cd615c7530093768d6061c2a7484ac5d64dde | [] | no_license | ABCaps35/ECSE473_ABC | b66c8288412a34c72c858e16fd2f93540291b8ff | f03b9ec90317dd730aa723cb7fa7254ea03e412f | refs/heads/master | 2023-03-09T09:46:47.963268 | 2021-02-11T03:44:19 | 2021-02-11T03:44:19 | 337,913,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mobot_mapping"
PROJECT_SPACE_DIR = "/home/abcaps35/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"acapelli345@gmail.com"
] | acapelli345@gmail.com |
83b3ede674e43d3ec88b0c8e25d143815f963c05 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_slaps.py | c2b00dc1a180251eb620011c2de56eb92b11daf6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _SLAPS():
def __init__(self,):
self.name = "SLAPS"
self.definitions = slap
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['slap']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
edc0b0666f4d7c9b685ef4a501def9c7fb1478b4 | 253089ef4ee99c50cdaa23fde4d789794789e2e9 | /97/holidays.py | 74ab4707382349d1194683c15ef69f436f20dcc0 | [] | no_license | Zaubeerer/bitesofpy | 194b61c5be79c528cce3c14b9e2c5c4c37059259 | e5647a8a7a28a212cf822abfb3a8936763cd6b81 | refs/heads/master | 2021-01-01T15:01:21.088411 | 2020-11-08T19:56:30 | 2020-11-08T19:56:30 | 239,328,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | from collections import defaultdict
import os
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
import re
from datetime import datetime
# prep data
tmp = os.getenv("TMP", "/tmp")
page = 'us_holidays.html'
holidays_page = os.path.join(tmp, page)
urlretrieve(
f'https://bites-data.s3.us-east-2.amazonaws.com/{page}',
holidays_page
)
with open(holidays_page) as f:
content = f.read()
holidays = defaultdict(list)
def get_us_bank_holidays(content=content):
"""Receive scraped html output, make a BS object, parse the bank
holiday table (css class = list-table), and return a dict of
keys -> months and values -> list of bank holidays"""
holiday_dict = defaultdict(list)
soup = BeautifulSoup(content, "html.parser")
table = soup.find("table", class_ = "list-table")
rows = table.findAll('tr')
for tr in rows[1:]:
cols = tr.findAll('td')
month = cols[1].findAll(text=True)[1][5:7]
name = cols[3].findAll(text=True)[1].strip()
holiday_dict[month].append(name)
return holiday_dict | [
"r.beer@outlook.de"
] | r.beer@outlook.de |
d153b13c505232c9e7cad79ccf9c2e66cb7852b9 | 6a819308924a005aa66475515bd14586b97296ae | /venv/lib/python3.6/site-packages/PIL/ImagePalette.py | f33722f5ac2d67b2c4d3fefb58007d195c3253e7 | [] | no_license | AlexandrTyurikov/my_first_Django_project | a2c655dc295d3904c7688b8f36439ae8229d23d1 | 1a8e4d033c0ff6b1339d78c329f8beca058b019a | refs/heads/master | 2020-05-04T13:20:20.100479 | 2019-05-04T23:41:39 | 2019-05-04T23:41:39 | 179,156,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,320 | py | #
# The Python Imaging Library.
# $Id$
#
# images palette object
#
# History:
# 1996-03-11 fl Rewritten.
# 1997-01-03 fl Up and running.
# 1997-08-23 fl Added load hack
# 2001-04-16 fl Fixed randint shadow bug in random()
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import array
from . import ImageColor, GimpPaletteFile, GimpGradientFile, PaletteFile
class ImagePalette(object):
"""
Color palette for palette mapped images
:param mode: The mode to use for the Palette. See:
:ref:`concept-modes`. Defaults to "RGB"
:param palette: An optional palette. If given, it must be a bytearray,
an array or a list of ints between 0-255 and of length ``size``
times the number of colors in ``mode``. The list must be aligned
by channel (All R values must be contiguous in the list before G
and B values.) Defaults to 0 through 255 per channel.
:param size: An optional palette size. If given, it cannot be equal to
or greater than 256. Defaults to 0.
"""
def __init__(self, mode="RGB", palette=None, size=0):
self.mode = mode
self.rawmode = None # if set, palette contains raw data
self.palette = palette or bytearray(range(256))*len(self.mode)
self.colors = {}
self.dirty = None
if ((size == 0 and len(self.mode)*256 != len(self.palette)) or
(size != 0 and size != len(self.palette))):
raise ValueError("wrong palette size")
def copy(self):
new = ImagePalette()
new.mode = self.mode
new.rawmode = self.rawmode
if self.palette is not None:
new.palette = self.palette[:]
new.colors = self.colors.copy()
new.dirty = self.dirty
return new
def getdata(self):
"""
Get palette contents in format suitable for the low-level
``im.putpalette`` primitive.
.. warning:: This method is experimental.
"""
if self.rawmode:
return self.rawmode, self.palette
return self.mode + ";L", self.tobytes()
def tobytes(self):
"""Convert palette to bytes.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(self.palette, bytes):
return self.palette
arr = array.array("B", self.palette)
if hasattr(arr, 'tobytes'):
return arr.tobytes()
return arr.tostring()
# Declare tostring as an alias for tobytes
tostring = tobytes
def getcolor(self, color):
"""Given an rgb tuple, allocate palette entry.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(color, tuple):
try:
return self.colors[color]
except KeyError:
# allocate new color slot
if isinstance(self.palette, bytes):
self.palette = bytearray(self.palette)
index = len(self.colors)
if index >= 256:
raise ValueError("cannot allocate more than 256 colors")
self.colors[color] = index
self.palette[index] = color[0]
self.palette[index+256] = color[1]
self.palette[index+512] = color[2]
self.dirty = 1
return index
else:
raise ValueError("unknown color specifier: %r" % color)
def save(self, fp):
"""Save palette to text file.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(fp, str):
fp = open(fp, "w")
fp.write("# Palette\n")
fp.write("# Mode: %s\n" % self.mode)
for i in range(256):
fp.write("%d" % i)
for j in range(i*len(self.mode), (i+1)*len(self.mode)):
try:
fp.write(" %d" % self.palette[j])
except IndexError:
fp.write(" 0")
fp.write("\n")
fp.close()
# --------------------------------------------------------------------
# Internal
def raw(rawmode, data):
palette = ImagePalette()
palette.rawmode = rawmode
palette.palette = data
palette.dirty = 1
return palette
# --------------------------------------------------------------------
# Factories
def make_linear_lut(black, white):
lut = []
if black == 0:
for i in range(256):
lut.append(white*i//255)
else:
raise NotImplementedError # FIXME
return lut
def make_gamma_lut(exp):
lut = []
for i in range(256):
lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5))
return lut
def negative(mode="RGB"):
palette = list(range(256))
palette.reverse()
return ImagePalette(mode, palette * len(mode))
def random(mode="RGB"):
from random import randint
palette = []
for i in range(256*len(mode)):
palette.append(randint(0, 255))
return ImagePalette(mode, palette)
def sepia(white="#fff0c0"):
r, g, b = ImageColor.getrgb(white)
r = make_linear_lut(0, r)
g = make_linear_lut(0, g)
b = make_linear_lut(0, b)
return ImagePalette("RGB", r + g + b)
def wedge(mode="RGB"):
return ImagePalette(mode, list(range(256)) * len(mode))
def load(filename):
# FIXME: supports GIMP gradients only
with open(filename, "rb") as fp:
for paletteHandler in [
GimpPaletteFile.GimpPaletteFile,
GimpGradientFile.GimpGradientFile,
PaletteFile.PaletteFile
]:
try:
fp.seek(0)
lut = paletteHandler(fp).getpalette()
if lut:
break
except (SyntaxError, ValueError):
# import traceback
# traceback.print_exc()
pass
else:
raise IOError("cannot load palette")
return lut # data, rawmode
| [
"tyur.sh@gmail.com"
] | tyur.sh@gmail.com |
ae482f5f801c9fc7d714c8b1c1d136d4a5ea6ea7 | ba095b34fb62cff6f5f6f32dc7036f13b45681a2 | /llia/synths/algo/algo_constants.py | 95a9f6e36228d341519f6ef51d8d46c1e077f12a | [] | no_license | plewto/Llia | 7d3c60bd7355d02e9b00e97c82f24da5fa83b0f4 | 97f530ff0841b9604f0d9575e7e1f0e3c0660be0 | refs/heads/master | 2020-05-21T20:39:07.223990 | 2018-04-30T02:28:55 | 2018-04-30T02:28:55 | 63,315,753 | 17 | 2 | null | 2016-08-04T17:10:17 | 2016-07-14T08:05:33 | Python | UTF-8 | Python | false | false | 1,968 | py | # llia.synths.algo.algo_constants
CFILL = "black"
CFOREGROUND = "white"
COUTLINE = "white"
MOD_RANGE_COUNT = 6
KEYSCALES = (-18,-12,-9,-6,-3,0,3,6,9,12,18)
LFO_RATIOS = ((0.125,"1/8"),
(0.250,"1/4"),
(0.375,"3/8"),
(0.500,"1/2"),
(0.625,"5/8"),
(0.750,"3/4"),
(0.875,"7/8"),
(1.000,"1"),
(1.250,"1 1/4"),
(4/3.0, "1 1/3"),
(1.500, "1 1/2"),
(5/3.0, "1 2/3"),
(1.750, "1 3/4"),
(2.000, "2"),
(2.500, "2 1/2"),
(3.000, "3"),
(4.000, "4"),
(5.000, "5"),
(6.000, "6"),
(8.000, "8"),
(9.000, "9"),
(12.00, "12"),
(16.00, "16"))
_a = range(0,128,12)
_b = range(6,128,12)
_c = _a+_b
_c.sort()
KEY_BREAKPOINTS = tuple(_c)
MAX_ENV_SEGMENT = 12
HARMONICS = []
for n,f in (( 1, 0.25),
( 8, 0.50),
( 3, 0.75),
(24, 1.00),
( 3, 1.333),
( 8, 1.5),
(24, 2.0),
(18, 3.0),
(12, 4.0),
( 7, 5.0),
( 9, 6.0),
( 1, 7.0),
( 6, 8.0),
( 4, 9.0),
( 2,10.0),
( 2,12.0),
( 1,16.0)):
for i in range(n):
HARMONICS.append(f)
# Envelope times
#
ULTRA_FAST = 1
FAST = 2
MEDIUM = 3
SLOW = 4
GLACIAL = 5
FULL = 6
ENV_TIME_NAMES = {ULTRA_FAST : "Ultra-fast", # (0.00, 0.01)
FAST : "Fast", # (0.00, 0.10)
MEDIUM : "Medium", # (0.10, 1.00)
SLOW : "Slow", # (1.00, 4.00)
GLACIAL : "Glacial", # (4.00, 12.0)
FULL : "Full", # (0.00, 12.0)
None : ""}
# Envelope contours
#
GATE = 1
PERCUSSIVE = 2
ASR = 3
ADSR = 4
| [
"plewto@gmail.com"
] | plewto@gmail.com |
f51d6f03e2249ff68e86a5c1b53336e2988f0477 | 3f9511cdf1fc3dc76f1acda62be061f6442a1289 | /tests/sparkml/test_imputer.py | 9b238cf25b163506d3744f808f38d0d27c16a63e | [
"Apache-2.0"
] | permissive | xadupre/onnxmltools | e0aa5a2731c07a87cf0ec0f7b52507dc8c25e6cf | facefb245d991aa30c49bff7510a803997bc8137 | refs/heads/master | 2023-08-08T10:43:32.769022 | 2022-06-20T11:24:03 | 2022-06-20T11:24:03 | 331,380,871 | 0 | 0 | Apache-2.0 | 2021-01-20T17:30:45 | 2021-01-20T17:30:44 | null | UTF-8 | Python | false | false | 3,912 | py | # SPDX-License-Identifier: Apache-2.0
import sys
import unittest
import numpy
from pyspark.ml.feature import Imputer
from onnx.defs import onnx_opset_version
from onnxconverter_common.onnx_ex import DEFAULT_OPSET_NUMBER
from onnxmltools import convert_sparkml
from onnxmltools.convert.common.data_types import FloatTensorType
from tests.sparkml.sparkml_test_utils import save_data_models, run_onnx_model, compare_results
from tests.sparkml import SparkMlTestCase
TARGET_OPSET = min(DEFAULT_OPSET_NUMBER, onnx_opset_version())
## For some reason during the spark bring up and shutdown something happens causing Imputer
## tests to fail. For that you need to run each test here individually
## for now these will be commented out so as not to break the build
## AttributeError: 'NoneType' object has no attribute 'setCallSite' on model.surrogateDF
## Therefore we leave these tests out for now until a newere version of pyspark is availabe that address this issue
class TestSparkmlImputer(SparkMlTestCase):
@unittest.skipIf(sys.version_info < (3, 8),
reason="pickle fails on python 3.7")
def test_imputer_single(self):
self._imputer_test_single()
@unittest.skipIf(True, reason="Name:'Split' Status Message: Cannot split using values in 'split")
@unittest.skipIf(sys.version_info < (3, 8),
reason="pickle fails on python 3.7")
def test_imputer_multi(self):
self._imputer_test_multi()
def _imputer_test_multi(self):
data = self.spark.createDataFrame([
(1.0, float("nan")),
(2.0, float("nan")),
(float("nan"), 3.0),
(4.0, 4.0),
(5.0, 5.0)
], ["a", "b"])
imputer = Imputer(inputCols=["a", "b"], outputCols=["out_a", "out_b"])
model = imputer.fit(data)
# the input name should match the inputCols above
model_onnx = convert_sparkml(model, 'Sparkml Imputer Multi Input', [
('a', FloatTensorType([None, 1])),
('b', FloatTensorType([None, 1]))], target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
# run the model
predicted = model.transform(data)
expected = predicted.select("out_a", "out_b").toPandas().values.astype(numpy.float32)
data_np = data.toPandas().values.astype(numpy.float32)
data_np = {'a': data_np[:, :1], 'b': data_np[:, 1:]}
paths = save_data_models(data_np, expected, model, model_onnx, basename="SparkmlImputerMulti")
onnx_model_path = paths[-1]
output, output_shapes = run_onnx_model(['out_a', 'out_b'], data_np, onnx_model_path)
compare_results(expected, output, decimal=5)
def _imputer_test_single(self):
data = self.spark.createDataFrame([
(1.0, float("nan")),
(2.0, float("nan")),
(float("nan"), 3.0),
(4.0, 4.0),
(5.0, 5.0)
], ["a", "b"])
imputer = Imputer(inputCols=["a"], outputCols=["out_a"])
model = imputer.fit(data)
# the input name should match the inputCols above
model_onnx = convert_sparkml(model, 'Sparkml Imputer', [
('a', FloatTensorType([None, 1]))], target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
# run the model
predicted = model.transform(data)
expected = predicted.select("out_a").toPandas().values.astype(numpy.float32)
data_np = data.toPandas().a.values.astype(numpy.float32)
data_np = data_np.reshape((-1, 1))
paths = save_data_models(data_np, expected, model, model_onnx, basename="SparkmlImputerSingle")
onnx_model_path = paths[-1]
output, output_shapes = run_onnx_model(['out_a'], data_np, onnx_model_path)
compare_results(expected, output, decimal=5)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | xadupre.noreply@github.com |
61fd36e2270c1aa85f01ad2f827292a06b68e384 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_overs.py | ce1fa766d347ed234f486bbfb1bcc49794d6c8dd | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._over import _OVER
#calss header
class _OVERS(_OVER, ):
def __init__(self,):
_OVER.__init__(self)
self.name = "OVERS"
self.specie = 'nouns'
self.basic = "over"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8f4d202437faa4e43520033760111cb066a89fb0 | 49122876b08f17e2f6a2a1efe41f0e2fc3623db6 | /publication/migrations/0001_initial.py | 7dbdcb207aa0e5c90b8e0fcc58da4dedc0f0cf92 | [] | no_license | Ansagan-Kabdolla/vestnik | bb2010daa22155953501fc5405ac9cdd36c5b68c | 40155e92e91d5c56c9018f51e277e7c64c95c134 | refs/heads/master | 2022-04-26T09:21:34.656317 | 2020-04-29T10:44:44 | 2020-04-29T10:44:44 | 259,898,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | # Generated by Django 2.2.4 on 2020-04-10 11:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Predmeti',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Название')),
('img_url', models.FileField(upload_to='pred_img', verbose_name='Фото')),
('description', models.TextField(verbose_name='Описание')),
('date', models.DateTimeField(auto_now_add=True, db_index=True)),
],
options={
'verbose_name': 'Предмет',
'verbose_name_plural': 'Предметы',
'ordering': ['date'],
},
),
migrations.CreateModel(
name='Filepdf',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200, verbose_name='Авторы')),
('file', models.FileField(upload_to='', verbose_name='Файл')),
('date', models.DateTimeField(auto_now_add=True, db_index=True)),
('serius', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='publication.Predmeti', verbose_name='Серия')),
],
options={
'verbose_name': 'Публикация',
'verbose_name_plural': 'Публикации',
'ordering': ['date'],
},
),
]
| [
"ansagankabdolla4@gmail.com"
] | ansagankabdolla4@gmail.com |
fe0dee7118ce7fdfd87aaf3117f961056f616985 | bf0e884ed3c9b57d0bc022c45b4bd50f7f5ba34a | /tomheon/day19/day19-1.py | 33e533ec6d1b5d1deebfdbd7cbb3db6ce9861bea | [
"MIT"
] | permissive | sean-hart/advent2020 | 8db117f3e778ec8044e97ce8a0d17edeb6351415 | 1174afcedf9a8db5134803869e63ea182637fc29 | refs/heads/main | 2023-02-07T15:52:57.956987 | 2020-12-27T00:42:55 | 2020-12-27T00:42:55 | 317,643,649 | 0 | 8 | MIT | 2020-12-27T00:42:56 | 2020-12-01T19:17:17 | Python | UTF-8 | Python | false | false | 2,481 | py | import sys
from itertools import takewhile, dropwhile
def make_atom_checker(rule_text):
atom = rule_text.strip('"')
def _check_atom(message):
nonlocal atom
if message.startswith(atom):
return True, message[len(atom):]
else:
return False, message
return _check_atom
def make_concat_checker(checkers, rule_text):
sub_rules = [int(r) for r in rule_text.split()]
def _check_concat(message):
remaining = message
nonlocal sub_rules
for r in sub_rules:
matched, remaining = checkers[r](remaining)
if not matched:
return False, message
return True, remaining
return _check_concat
def make_optional_checker(checkers, rule_text):
sub_checkers = [make_concat_checker(checkers, r) for r in rule_text.split('|')]
def _check_optional(message):
nonlocal sub_checkers
for c in sub_checkers:
matched, remaining = c(message)
if matched:
return True, remaining
return False, message
return _check_optional
def is_atom_rule(rule_text):
return rule_text.startswith('"')
def is_concat_rule(rule_text):
return all([x not in rule_text for x in ['"', '|']])
def is_optional_rule(rule_text):
return '|' in rule_text
def make_rules_checker(rules):
checkers = dict()
for rule in rules:
rule_no, rule_text = rule.split(":")
rule_no = int(rule_no)
rule_text = rule_text.strip()
checker = None
if is_atom_rule(rule_text):
checker = make_atom_checker(rule_text)
elif is_concat_rule(rule_text):
checker = make_concat_checker(checkers, rule_text)
elif is_optional_rule(rule_text):
checker = make_optional_checker(checkers, rule_text)
else:
raise Error(f"Couldn't create checker for {rule_no} {rule_text}")
checkers[rule_no] = checker
def _rules_checker(message):
nonlocal checkers
matched, remaining = checkers[0](message)
return matched and not remaining
return _rules_checker
def main():
rules = [line.strip() for line in takewhile(lambda l: l.strip(), sys.stdin)]
checker = make_rules_checker(rules)
messages = [line.strip() for line in dropwhile(lambda l: not l.strip(), sys.stdin)]
print(len([m for m in messages if checker(m)]))
if __name__ == '__main__':
main()
| [
"tomheon@gmail.com"
] | tomheon@gmail.com |
6a74b019629064bc3870806038bd746ab965c5b1 | ad69290bc5210424259ac0481aff95896ad92433 | /dalet/addresses.py | 607c611a12803171dbcf2d2f7afa368974f9870d | [
"MIT"
] | permissive | reuf/dalet | 9ade431ffb49e0db01d98553be3afd653b9e2a5c | 3af0c266cdd9b390da9c2a828d5b0cde1ee2b8b8 | refs/heads/master | 2021-06-18T17:26:53.656073 | 2017-05-28T18:23:38 | 2017-05-28T18:23:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | import re
import countrynames
from normality import stringify
from normality.cleaning import remove_control_chars, collapse_spaces
LINE_BREAKS = re.compile(r'(\r\n|\n|<BR/>|\t|ESQ\.,|ESQ,|;)')
REMOVE = re.compile(r'(ATTENTION|ATTN|C/O|UNDELIVERABLE DOMESTIC ADDRESS)')
COMMATA = re.compile(r'(,\s?[,\.])')
def clean_address(address):
address = stringify(address)
if address is None:
return
address = address.upper()
address = LINE_BREAKS.sub(', ', address)
address = REMOVE.sub(' ', address)
address = COMMATA.sub(', ', address)
address = remove_control_chars(address)
address = collapse_spaces(address)
# return none if this is just a country code or name:
code = countrynames.to_code(address, fuzzy=False)
if code is not None:
return
return address
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
c85d7cd9365249c757dcb0502bd5334fa989d9f7 | 1121c346e6ef7e3e59f0b372424d9c78c3ecebf1 | /repository.py | 2d7a9d5a0cd0e76afa5058197f6a30c3168965dd | [] | no_license | xueyuanl/pyt | 321c8b1112ad9ee20bb8362fff13e598f300e8b4 | 3d1ca47b16ebb072ac4564a450934386e92852f2 | refs/heads/master | 2021-01-01T10:43:28.267669 | 2020-02-09T04:07:37 | 2020-02-09T04:07:37 | 239,243,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,706 | py | import argparse
import collections
import configparser
import hashlib
import os
import re
import sys
import zlib
class GitRepository(object):
"""A git repository"""
worktree = None
gitdir = None
conf = None
def __init__(self, path, force=False):
self.worktree = path
self.gitdir = os.path.join(path, ".git")
if not (force or os.path.isdir(self.gitdir)):
raise Exception("Not a Git repository %s" % path)
# Read configuration file in .git/config
self.conf = configparser.ConfigParser()
cf = repo_file(self, "config")
if cf and os.path.exists(cf):
self.conf.read([cf])
elif not force:
raise Exception("Configuration file missing")
if not force:
vers = int(self.conf.get("core", "repositoryformatversion"))
if vers != 0:
raise Exception("Unsupported repositoryformatversion %s" % vers)
def repo_path(repo, *path):
"""Compute path under repo's gitdir."""
return os.path.join(repo.gitdir, *path)
def repo_file(repo, *path, mkdir=False):
"""Same as repo_path, but create dirname(*path) if absent. For
example, repo_file(r, \"refs\", \"remotes\", \"origin\", \"HEAD\") will create
.git/refs/remotes/origin."""
if repo_dir(repo, *path[:-1], mkdir=mkdir):
return repo_path(repo, *path)
def repo_dir(repo, *path, mkdir=False):
"""Same as repo_path, but mkdir *path if absent if mkdir."""
path = repo_path(repo, *path)
if os.path.exists(path):
if (os.path.isdir(path)):
return path
else:
raise Exception("Not a directory %s" % path)
if mkdir:
os.makedirs(path)
return path
else:
return None
def repo_create(path):
"""Create a new repository at path."""
repo = GitRepository(path, True)
# First, we make sure the path either doesn't exist or is an
# empty dir.
if os.path.exists(repo.worktree):
if not os.path.isdir(repo.worktree):
raise Exception("%s is not a directory!" % path)
if os.listdir(repo.worktree):
raise Exception("%s is not empty!" % path)
else:
os.makedirs(repo.worktree)
assert (repo_dir(repo, "branches", mkdir=True))
assert (repo_dir(repo, "objects", mkdir=True))
assert (repo_dir(repo, "refs", "tags", mkdir=True))
assert (repo_dir(repo, "refs", "heads", mkdir=True))
# .git/description
with open(repo_file(repo, "description"), "w") as f:
f.write("Unnamed repository; edit this file 'description' to name the repository.\n")
# .git/HEAD
with open(repo_file(repo, "HEAD"), "w") as f:
f.write("ref: refs/heads/master\n")
with open(repo_file(repo, "config"), "w") as f:
config = repo_default_config()
config.write(f)
return repo
def repo_default_config():
ret = configparser.ConfigParser()
ret.add_section("core")
ret.set("core", "repositoryformatversion", "0")
ret.set("core", "filemode", "false")
ret.set("core", "bare", "false")
return ret
def repo_find(path=".", required=True):
path = os.path.realpath(path)
if os.path.isdir(os.path.join(path, ".git")):
return GitRepository(path)
# If we haven't returned, recurse in parent, if w
parent = os.path.realpath(os.path.join(path, ".."))
if parent == path:
# Bottom case
# os.path.join("/", "..") == "/":
# If parent==path, then path is root.
if required:
raise Exception("No git directory.")
else:
return None
# Recursive case
return repo_find(parent, required)
| [
"15186846+xueyuanl@users.noreply.github.com"
] | 15186846+xueyuanl@users.noreply.github.com |
3b6881e7df189cf51aa028d4693e8f04399096ab | 582ffc028085cacb1d69315889e611fb31a23f98 | /ch5-blog-app/blog/migrations/0001_initial.py | 3567050eba1387160f830e3b68f3f643499ea4b8 | [
"MIT"
] | permissive | balazskiss1985/djangoforbeginners | af04e0d441414e777b952325fdf62339e1b4c2c8 | 827b1b11592e851a6c4948d849ae8815f9c138c7 | refs/heads/master | 2022-12-05T13:48:01.648379 | 2020-08-24T14:09:02 | 2020-08-24T14:09:02 | 289,945,354 | 0 | 0 | MIT | 2020-08-24T14:07:22 | 2020-08-24T14:07:21 | null | UTF-8 | Python | false | false | 786 | py | # Generated by Django 3.1rc1 on 2020-07-22 17:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"will@wsvincent.com"
] | will@wsvincent.com |
2cf275cca9a55fcce0e3b89acf707b082409bf94 | 7fa06a5089a9b5a10553d457501abbaa0a7f1112 | /opencv/pro4_Detect_face_and_eyes/face_and_eye_detection.py | de47c4a64a40009cd07e1c4b0b8e6587cafcf91f | [] | no_license | dbetm/processing-images | 15e0687b8688328c98af2979b36e7ebd595141ef | 53dcf5431d47cf19d84c086e61a99df9a35c69fe | refs/heads/master | 2020-04-18T00:18:23.077066 | 2019-11-27T05:14:12 | 2019-11-27T05:14:12 | 167,071,638 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import numpy as np
import cv2
# Cargar el clasificador en cascada
face_classifier = cv2.CascadeClassifier("../Haarcascades/haarcascade_frontalface_default.xml")
eye_classifier = cv2.CascadeClassifier("../Haarcascades/haarcascade_eye.xml")
# Cargamos la imagen y la convertimos
# a escala de grises
img = cv2.imread("obama.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
# When no faces detected, face_classifier returns and empty tuple
if faces is ():
print("No Face Found")
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(127,0,255),2)
cv2.imshow('img',img)
cv2.waitKey(0)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_classifier.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,255,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"davbetm@gmail.com"
] | davbetm@gmail.com |
f96e024db9c5b10be18599484feec775f26283c2 | 1f8464d34c5fec12449133ebd7c18bc8629b1e18 | /infer.py | 4d5dac2a62e68c15dff0ead583c74d8c94d23dd4 | [] | no_license | markflies777/retinanet-digit-detector | 4eff6f1591e5adfaac115aca2c2a12b5d7735f6c | 6aadef08bfc29297479dce182ca2d4b553eddea7 | refs/heads/master | 2022-01-13T02:17:31.861004 | 2019-05-13T12:15:24 | 2019-05-13T12:15:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | # -*- coding: utf-8 -*-
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
from retina.utils import visualize_boxes
MODEL_PATH = 'snapshots/resnet50_full.h5'
IMAGE_PATH = 'samples/JPEGImages/1.png'
def load_inference_model(model_path=os.path.join('snapshots', 'resnet.h5')):
model = models.load_model(model_path, backbone_name='resnet50')
model = models.convert_model(model)
model.summary()
return model
def post_process(boxes, original_img, preprocessed_img):
# post-processing
h, w, _ = preprocessed_img.shape
h2, w2, _ = original_img.shape
boxes[:, :, 0] = boxes[:, :, 0] / w * w2
boxes[:, :, 2] = boxes[:, :, 2] / w * w2
boxes[:, :, 1] = boxes[:, :, 1] / h * h2
boxes[:, :, 3] = boxes[:, :, 3] / h * h2
return boxes
if __name__ == '__main__':
model = load_inference_model(MODEL_PATH)
# load image
image = read_image_bgr(IMAGE_PATH)
# copy to draw on
draw = image.copy()
draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, _ = resize_image(image, 416, 448)
# process image
start = time.time()
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
print("processing time: ", time.time() - start)
boxes = post_process(boxes, draw, image)
labels = labels[0]
scores = scores[0]
boxes = boxes[0]
visualize_boxes(draw, boxes, labels, scores, class_labels=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
# 5. plot
plt.imshow(draw)
plt.show()
| [
"penny4860@gmail.com"
] | penny4860@gmail.com |
8f756e3bb14502ea7e325811d0c6fd2120a152ac | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2673/60585/281192.py | 0974fa348779c9a871dc14eaae1e67868df5eecf | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | t=eval(input())
for _ in range(t):
n=bin(eval(input())).replace('0b','')
res=n[0]
for i in range(1,len(n)):
if res[-1]==n[i]:
res+='0'
else:
res+='1'
print(int(res,2))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
bf65e117900e20d5e6f3d1d8fa220ae79b2e3121 | c77b2f06a971d5e77a3dc71e972ef27fc85475a5 | /algo_ds/_general/merge_sort.py | 64cb2a65de1663f9525ecdc465874a585bde22de | [] | no_license | thefr33radical/codeblue | f25520ea85110ed09b09ae38e7db92bab8285b2f | 86bf4a4ba693b1797564dca66b645487973dafa4 | refs/heads/master | 2022-08-01T19:05:09.486567 | 2022-07-18T22:56:05 | 2022-07-18T22:56:05 | 110,525,490 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 20:13:25 2017
@author: gowtham
"""
def sorter(arr,low,mid,high):
start1=low
start2=mid+1
temp=[]
start1=int(start1)
start2=int(start2)
while(start1<=mid and start2<=high):
if(arr[start1]<arr[start2]):
temp.append(arr[start1])
start1=start1+1
else:
temp.append(arr[start2])
start2=start2+1
while(start1<=mid):
temp.append(arr[start1])
start1=start1+1
while(start2<=high):
temp.append(arr[start2])
start2=start2+1
arr=temp
def merge(l,low,high):
if(int(low)<int(high)):
mid=(low+high)/2
merge(l,low,mid)
merge(l,mid+1,high)
sorter(l,low,mid,high)
if __name__=='main':
l=[34,343,54,5,555,85]
else:
l=[34,343,54,5,555,85]
l.sort()
merge(l,0,int(len(l)-1))
print (l) | [
"imperial.gauntlet@gmail.com"
] | imperial.gauntlet@gmail.com |
fc3350506279dd9c1c2a5b781c39c33bb77c568b | a6678062b0cd9f2477e9e25d03f7a83f91ce994e | /upk/apk.py | 378107a37b43996269a9fc7970cdbe772aa0c035 | [
"MIT"
] | permissive | Cologler/upk-python | d0e2068984254ffbe4f35512751d63be3ad522e9 | f20f4ff3167d7a5a089523154b0b8f47973ea311 | refs/heads/main | 2023-04-17T06:34:59.427532 | 2021-04-29T15:54:36 | 2021-04-29T15:54:36 | 314,580,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from typing import TypedDict, Optional
from logging import Logger
import zipfile
import xml.etree.ElementTree as et
from .androidManifestDecompress import read
class _PackageInfo(TypedDict):
package: Optional[str]
version: Optional[str]
def read_package_info(path: str, logger: Logger) -> Optional[_PackageInfo]:
'read package info from *.apk file.'
with zipfile.ZipFile(path) as z:
with z.open('AndroidManifest.xml') as am:
try:
a = read(am)
except:
logger.warning(f'unable decode manifest, skiped.')
else:
xml = et.fromstring(a)
return dict(
package=xml.get('package'),
version=xml.get('versionName')
)
| [
"skyoflw@gmail.com"
] | skyoflw@gmail.com |
38573492b46389b756279bc94787a0408c6ec72b | 2ccb99e0b35b58622c5a0be2a698ebda3ab29dec | /testing/web-platform/tests/XMLHttpRequest/resources/chunked.py | 7adabbfd7f471a7491508f613868300836ae74fc | [
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause"
] | permissive | roytam1/palemoon27 | f436d4a3688fd14ea5423cbcaf16c4539b88781f | 685d46ffdaee14705ea40e7ac57c4c11e8f31cd0 | refs/heads/master | 2023-08-20T10:11:13.367377 | 2023-08-17T07:28:43 | 2023-08-17T07:28:43 | 142,234,965 | 61 | 16 | NOASSERTION | 2022-03-30T07:54:03 | 2018-07-25T02:10:02 | null | UTF-8 | Python | false | false | 666 | py | def main(request, response):
chunks = ["First chunk\r\n",
"Second chunk\r\n",
"Yet another (third) chunk\r\n",
"Yet another (fourth) chunk\r\n",
]
response.headers.set("Transfer-Encoding", "chunked");
response.headers.set("Trailer", "X-Test-Me");
response.headers.set("Content-Type", "text/plain");
response.write_status_headers()
for value in chunks:
response.writer.write("%d\r\n" % len(value))
response.writer.write(value)
response.writer.write("\r\n")
response.writer.write("0\r\n")
response.writer.write("X-Test-Me: Trailer header value\r\n\r\n")
| [
"roytam@gmail.com"
] | roytam@gmail.com |
6b3067f48101cec3d7d205b4a8a24c5bf2432457 | 05b7569b3999b3871fa1c72bdff172accfe7a48c | /nacao/PreProcess.py | d19da833d48f720edb7aa83198cd4b9293bef261 | [] | no_license | nanqianbeiquan/keras | d997cf2188ccb0e8e73143c26a7283ebd1275c42 | 576a32b4ccc75fc723a5f8662de1460a26b43822 | refs/heads/master | 2021-05-07T03:07:38.841726 | 2017-11-15T02:24:33 | 2017-11-15T02:24:33 | 109,337,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,051 | py | # -*- coding: utf-8 -*-
import numpy as np
import cv2
import os
import random
import time
class PreProcess(object):
def ConvertToGray(self,Image,filename):
GrayImage = cv2.cvtColor(Image,cv2.COLOR_BGR2GRAY)
return GrayImage
def ConvertToBpp(self,GrayImage,filename):
App,Bpp = cv2.threshold(GrayImage,130,255,cv2.THRESH_BINARY)
return Bpp
def RemoveLine(self,Bpp,filename):
m=1
n=1
near_dots = 0
for x in range(Bpp.shape[0]-1):
for y in range(Bpp.shape[1]-1):
pix = Bpp[x][y]
if pix == Bpp[x-1][y-1]:
near_dots += 1
if pix == Bpp[x-1][y]:
near_dots += 1
if pix == Bpp[x-1][y+1]:
near_dots += 1
if pix == Bpp[x][y-1]:
near_dots += 1
if pix == Bpp[x][y+1]:
near_dots += 1
if pix == Bpp[x+1][y-1]:
near_dots += 1
if pix == Bpp[x+1][y]:
near_dots += 1
if pix == Bpp[x+1][y+1]:
near_dots += 1
if near_dots < 5:
Bpp[x][y] = Bpp[x][y-1]
cv2.imwrite('1.jpg', Bpp)
return Bpp
def InterferLine(self,Bpp,filename):
for i in range(50):
for j in range(Bpp.shape[0]):
Bpp[j][i] = 255
for j in range(171,Bpp.shape[1]):
for i in range(0,Bpp.shape[0]):
Bpp[j][i] = 255
m = 1
n = 1
for i in range(50, 171):
while (m < Bpp.shape[0]-1):
if Bpp[m][i] == 0:
if Bpp[m+1][i] == 0:
n = m+1
elif m>0 and Bpp[m-1][i] == 0:
n = m
m = n-1
else:
n = m+1
break
elif m != Bpp.shape[0]:
l = 0
k = 0
ll = m
kk = m
while(ll>0):
if Bpp[ll][i] == 0:
ll = ll-1
l = l+1
else:
break
while(kk>0):
if Bpp[kk][i] == 0:
kk = kk-1
k = k+1
else:
break
if (l <= k and l != 0) or (k == 0 and l != 0):
m = m-1
else:
m = m+1
else:
break
if m>0 and Bpp[m-1][i] == 0 and Bpp[n-1][i] == 0:
continue
else:
Bpp[m][i] = 255
Bpp[n][i] = 255
# cv2.imwrite(filename+'1.jpg', Bpp)
return Bpp
def CutImage(self, Bpp, filename):
outpath = 'E:/python/keras/nacao/temp/'
b1 = np.zeros((Bpp.shape[0],23))
for i in range(57,80):
for j in range(0,Bpp.shape[0]):
b1[j][i-57] = Bpp[j][i]
cv2.imwrite(outpath+'%d' %(time.time()*1000)+str(random.randint(1000,9999))+'.png',b1)
b2 = np.zeros((Bpp.shape[0],21))
for i in range(81,102):
for j in range(0,Bpp.shape[0]):
b2[j][i-81] = Bpp[j][i]
cv2.imwrite(outpath +'%d' %(time.time()*1000)+str(random.randint(1000,9999))+'.png',b2)
b3 = np.zeros((Bpp.shape[0],21))
for i in range(102,123):
for j in range(0,Bpp.shape[0]):
b3[j][i-102] = Bpp[j][i]
cv2.imwrite(outpath+'%d' %(time.time()*1000)+str(random.randint(1000,9999))+'.png',b3)
b4 = np.zeros((Bpp.shape[0],21))
for i in range(124,145):
for j in range(0,Bpp.shape[0]):
b4[j][i-124] = Bpp[j][i]
cv2.imwrite(outpath+'%d' %(time.time()*1000)+str(random.randint(1000,9999))+'.png',b4)
b5 = np.zeros((Bpp.shape[0],23))
for i in range(145,168):
for j in range(0,Bpp.shape[0]):
b5[j][i-145] = Bpp[j][i]
cv2.imwrite(outpath+'%d' %(time.time()*1000)+str(random.randint(1000,9999))+'.png',b5)
return (b1,b2,b3,b4,b5)
def InterferPoint(self,Bpp,filename):
m = 1
n = 1
for i in range(0, 20):
while (m < Bpp.shape[0]-1):
if Bpp[m][i] == 0:
if Bpp[m+1][i] == 0:
n = m+1
elif m>0 and Bpp[m-1][i] == 0:
n = m
m = n-1
else:
n = m+1
break
elif m != Bpp.shape[0]:
l = 0
k = 0
ll = m
kk = m
while(ll>0):
if Bpp[ll][i] == 0:
ll = ll-1
l = l+1
else:
break
while(kk>0):
if Bpp[kk][i] == 0:
kk = kk-1
k = k+1
else:
break
if (l <= k and l != 0) or (k == 0 and l != 0):
m = m-1
else:
m = m+1
else:
break
if m>0 and Bpp[m-1][i] == 0 and Bpp[n-1][i] == 0:
continue
else:
Bpp[m][i] = 255
Bpp[n][i] = 255
cv2.imwrite('1.jpg', Bpp)
return Bpp
if __name__ == '__main__':
inpath = 'E:\pest1\\nacao'
PP = PreProcess()
for root,dirs,files in os.walk(inpath):
for filename in files:
Img = cv2.imread(root + '/' + filename)
GrayImage = PP.ConvertToGray(Img, filename)
# cv2.imshow('image',GrayImage)
# cv2.waitKey (0)
Bpp = PP.ConvertToBpp(GrayImage, filename)
Bpp_new = PP.InterferLine(Bpp, filename)
Bpp_r = PP.RemoveLine(Bpp, filename)
b = PP.CutImage(Bpp,filename)
inpath2 = 'E:\pest1\\nacao1'
outpath2 = 'E:\pest1\\nacao3\\'
for root,dirs,files in os.walk(inpath2):
for filename in files:
Img = cv2.imread(root + '/' + filename)
GrayImage = PP.ConvertToGray(Img, filename)
Bpp = PP.ConvertToBpp(GrayImage, filename)
p = PP.InterferPoint(Bpp, filename)
cv2.imwrite(outpath2+'%d' %(time.time()*1000)+str(random.randint(1000,9999))+'.png',p)
| [
"18801791073@163.com"
] | 18801791073@163.com |
e03823bb1b0db26108a8bda4155029fbfe027a13 | 0ba4cb23671ef141b530b42892c3904bf035c26b | /examples/mybot.py | 66279362900805dd4beafa9b42e9a2d4288654a8 | [] | no_license | Mika64/irc3 | 02b52904b008ee6076fc1fc564e0e7b2e3385777 | f21e2e2ac482e9a30b81f89d27367a49121a790b | refs/heads/master | 2021-01-15T09:08:43.126775 | 2014-05-01T14:53:07 | 2014-05-01T14:53:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | # -*- coding: utf-8 -*-
import logging.config
from irc3.plugins.command import command
import logging
import irc3
@irc3.plugin
class MyPlugin:
"""A plugin is a class which take the IrcBot as argument
"""
def __init__(self, bot):
self.bot = bot
self.log = self.bot.log
@irc3.event(irc3.rfc.JOIN)
def welcome(self, mask, channel):
"""Welcome people who join a channel"""
bot = self.bot
if mask.nick != self.bot.nick:
bot.call_with_human_delay(
bot.privmsg, channel, 'Welcome %s!' % mask.nick)
else:
bot.call_with_human_delay(
bot.privmsg, channel, "Hi guys!")
@command
def echo(self, mask, target, args):
"""Echo command
%%echo <words>...
"""
self.bot.privmsg(mask.nick, ' '.join(args['<words>']))
@irc3.extend
def my_usefull_command(self):
"""The extend decorator will allow you to call::
>>> bot.my_usefull_command()
"""
def main():
# logging configuration
logging.config.dictConfig(irc3.config.LOGGING)
# instanciate a bot
irc3.IrcBot(
nick='irc3', autojoins=['#irc3'],
host='irc.undernet.org', port=6667, ssl=False,
includes=[
'irc3.plugins.core',
'irc3.plugins.command',
'irc3.plugins.human',
__name__, # this register MyPlugin
]).run()
if __name__ == '__main__':
main()
| [
"gael@gawel.org"
] | gael@gawel.org |
6fe6f2b7352f3fc6f5888617910a5fc2aa936cb3 | 2276e1797b87b59e4b46af7cbcb84e920f5f9a92 | /Python/Best Time to Buy and Sell Stock II.py | 8cd02052d473ccf42a41121ad0539327088ee6f4 | [] | no_license | ZhengyangXu/LintCode-1 | dd2d6b16969ed4a39944e4f678249f2e67f20e0a | bd56ae69b4fa6a742406ec3202148b39b8f4c035 | refs/heads/master | 2020-03-18T04:44:31.094572 | 2016-01-10T00:20:44 | 2016-01-10T00:20:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | """
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit.
You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times).
However, you may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
"""
class Solution:
"""
@param prices: Given an integer array
@return: Maximum profit
"""
def maxProfit(self, prices):
# write your code here
if not prices or len(prices) == 0:
return 0
profit = 0
for i in range(1, len(prices)):
profit += prices[i] - prices[i - 1] if prices[i] > prices[i - 1] else 0
return profit
| [
"anthonyjin0619@gmail.com"
] | anthonyjin0619@gmail.com |
8c7cbee9d2cc83756d0ac306ffcc3fc0f20ffb50 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/comtypes/test/test_createwrappers.py | e0dafe908c7b84e4ef0a488ed37fe7d3931eec0f | [
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 4,063 | py | from __future__ import print_function
import glob
import os
import unittest
import warnings
import comtypes.client
import comtypes.client._generate
import comtypes.typeinfo
def setUpModule():
raise unittest.SkipTest("I have no idea what to do with this. It programmatically creates "
"*thousands* of tests and a few dozen of them fail.")
# requires("typelibs")
# filter warnings about interfaces without a base interface; they will
# be skipped in the code generation.
warnings.filterwarnings("ignore",
"Ignoring interface .* which has no base interface",
UserWarning)
# don't print messages when typelib wrappers are generated
comtypes.client._generate.__verbose__ = False
sysdir = os.path.join(os.environ["SystemRoot"], "system32")
progdir = os.environ["ProgramFiles"]
common_progdir = os.environ["CommonProgramFiles"]
# This test takes quite some time. It tries to build wrappers for ALL
# .dll, .tlb, and .ocx files in the system directory which contain typelibs.
class Test(unittest.TestCase):
def setUp(self):
"Do not write the generated files into the comtypes.gen directory"
comtypes.client.gen_dir = None
def tearDown(self):
comtypes.client.gen_dir = comtypes.client._find_gen_dir()
number = 0
def add_test(fname):
global number
def test(self):
try:
comtypes.typeinfo.LoadTypeLibEx(fname)
except WindowsError:
return
comtypes.client.GetModule(fname)
test.__doc__ = "test GetModule(%r)" % fname
setattr(Test, "test_%d" % number, test)
number += 1
for fname in glob.glob(os.path.join(sysdir, "*.ocx")):
add_test(fname)
for fname in glob.glob(os.path.join(sysdir, "*.tlb")):
add_test(fname)
for fname in glob.glob(os.path.join(progdir, r"Microsoft Office\Office*\*.tlb")):
if os.path.basename(fname).lower() in (
"grde50.olb", # UnicodeEncodeError
"xl5de32.olb", # UnicodeEncodeError
"grde50.olb", # UnicodeEncodeError
):
continue
add_test(fname)
for fname in glob.glob(os.path.join(progdir, r"Microsoft Office\Office*\*.olb")):
if os.path.basename(fname).lower() in (
"grde50.olb", # UnicodeEncodeError
"xl5de32.olb", # UnicodeEncodeError
"grde50.olb", # UnicodeEncodeError
):
continue
add_test(fname)
path = os.path.join(progdir, r"Microsoft Visual Studio .NET 2003\Visual Studio SDKs\DIA SDK\bin\msdia71.dll")
if os.path.isfile(path):
print("ADD", path)
add_test(path)
for fname in glob.glob(os.path.join(common_progdir, r"Microsoft Shared\Speech\*.dll")):
add_test(fname)
for fname in glob.glob(os.path.join(sysdir, "*.dll")):
# these typelibs give errors:
if os.path.basename(fname).lower() in (
"syncom.dll", # interfaces without base interface
"msvidctl.dll", # assignment to None
"scardssp.dll", # assertionerror sizeof()
"sccsccp.dll", # assertionerror sizeof()
# Typeinfo in comsvcs.dll in XP 64-bit SP 1 is broken.
# Oleview decompiles this code snippet (^ marks are m):
#[
# odl,
# uuid(C7B67079-8255-42C6-9EC0-6994A3548780)
#]
#interface IAppDomainHelper : IDispatch {
# HRESULT _stdcall pfnShutdownCB(void* pv);
# HRESULT _stdcall Initialize(
# [in] IUnknown* pUnkAD,
# [in] IAppDomainHelper __MIDL_0028,
# ^^^^^^^^^^^^^^^^
# [in] void* pPool);
# HRESULT _stdcall pfnCallbackCB(void* pv);
# HRESULT _stdcall DoCallback(
# [in] IUnknown* pUnkAD,
# [in] IAppDomainHelper __MIDL_0029,
# ^^^^^^^^^^^^^^^^
# [in] void* pPool);
#};
"comsvcs.dll",
):
continue
add_test(fname)
if __name__ == "__main__":
unittest.main()
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
9d568ee0da0b7d38e5e42f909ce7e3d97e831202 | c085578abc19db18ee0766e1f9598d79a3acdbe1 | /290-Word-Pattern/solution.py | d8cff43f74d5110e9f754da90d549719e16ebaca | [
"MIT"
] | permissive | Tanych/CodeTracking | efb6245edc036d7edf85e960972c34d03b8c707a | 86f1cb98de801f58c39d9a48ce9de12df7303d20 | refs/heads/master | 2020-05-21T17:40:10.105759 | 2016-10-09T18:20:42 | 2016-10-09T18:20:42 | 60,616,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | class Solution(object):
def wordPattern(self, pattern, strs):
"""
:type pattern: str
:type str: strs
:rtype: bool
"""
if not pattern and not strs:
return True
strlist=strs.split(" ")
if len(strlist)!=len(pattern):
return False
# chars map
charmap=[None]*26
plist=list(pattern)
while len(plist):
string,ch=strlist.pop(),plist.pop()
# get the index
index=ord(ch)-97
if charmap[index]!=string and charmap[index]:
return False
elif charmap[index]!=string and string in charmap:
return False
elif string not in charmap:
charmap[index]=string
return True
| [
"ychtan@email.gwu.edu"
] | ychtan@email.gwu.edu |
a643e7b0f7385c7628d0d02dc81cde3902e637f6 | 1c91439673c898c2219ee63750ea05ff847faee1 | /tools/deployment/pytorch2torchscript.py | f261b7c952602bc3c48f6f0cfaa8465bfccdb901 | [
"Apache-2.0"
] | permissive | ChenhongyiYang/GPViT | d7ba7f00d5139a989a999664ab0874c5c9d53d4d | 2b8882b2da41d4e175fe49a33fcefad1423216f4 | refs/heads/main | 2023-06-08T00:10:07.319078 | 2023-05-26T15:52:54 | 2023-05-26T15:52:54 | 577,075,781 | 78 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,364 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from functools import partial
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from torch import nn
from mmcls.models import build_classifier
torch.manual_seed(3)
def _demo_mm_inputs(input_shape: tuple, num_classes: int):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
gt_labels = rng.randint(
low=0, high=num_classes, size=(N, 1)).astype(np.uint8)
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(False),
'gt_labels': torch.LongTensor(gt_labels),
}
return mm_inputs
def pytorch2torchscript(model: nn.Module, input_shape: tuple, output_file: str,
verify: bool):
"""Export Pytorch model to TorchScript model through torch.jit.trace and
verify the outputs are same between Pytorch and TorchScript.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output
TorchScript model.
verify (bool): Whether compare the outputs between Pytorch
and TorchScript through loading generated output_file.
"""
model.cpu().eval()
num_classes = model.head.num_classes
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# replace original forward function
origin_forward = model.forward
model.forward = partial(model.forward, img_metas={}, return_loss=False)
with torch.no_grad():
trace_model = torch.jit.trace(model, img_list[0])
save_dir, _ = osp.split(output_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
trace_model.save(output_file)
print(f'Successfully exported TorchScript model: {output_file}')
model.forward = origin_forward
if verify:
# load by torch.jit
jit_model = torch.jit.load(output_file)
# check the numerical value
# get pytorch output
pytorch_result = model(img_list, img_metas={}, return_loss=False)[0]
# get jit output
jit_result = jit_model(img_list[0])[0].detach().numpy()
if not np.allclose(pytorch_result, jit_result):
raise ValueError(
'The outputs are different between Pytorch and TorchScript')
print('The outputs are same between Pytorch and TorchScript')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMCls to TorchScript')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file', type=str)
parser.add_argument(
'--verify',
action='store_true',
help='verify the TorchScript model',
default=False)
parser.add_argument('--output-file', type=str, default='tmp.pt')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[224, 224],
help='input image size')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
classifier = build_classifier(cfg.model)
if args.checkpoint:
load_checkpoint(classifier, args.checkpoint, map_location='cpu')
# convert model to TorchScript file
pytorch2torchscript(
classifier,
input_shape,
output_file=args.output_file,
verify=args.verify)
| [
"chenhongyiyang@Chenhongyis-MacBook-Pro.local"
] | chenhongyiyang@Chenhongyis-MacBook-Pro.local |
4e26b65bf4b0d1429e7f5b31c70652efb3ce0222 | 3e00e7fa0e2d41911fe91d858e0a9d2d0c1367c3 | /excercises/Closures and Decorators/Decorators 2 Name Directory.py | 4a868d9bed83072b64d4024cf67d5523db44cc90 | [] | no_license | Marius-Juston/Python-Hackerrank | 544867b4e85da2b40016b6e6d1ae403f991a554d | ad623d0dd21a89c64dc870b3d19332df390c436e | refs/heads/master | 2021-06-27T00:13:04.832916 | 2020-09-26T05:44:24 | 2020-09-26T05:44:24 | 150,328,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | def person_lister(f):
def inner(people: list):
people.sort(key=lambda x: int(x[2]))
return (f(p) for p in people)
return inner
@person_lister
def name_format(person):
return ("Mr. " if person[3] == "M" else "Ms. ") + person[0] + " " + person[1]
if __name__ == '__main__':
people = [input().split() for i in range(int(input()))]
print(*name_format(people), sep='\n')
| [
"Marius.juston@hotmail.fr"
] | Marius.juston@hotmail.fr |
ee343f08234ead3a6d75d6b2c4124b64188600ee | 1a5a9bfa6ee62c328fc6ab828ad743c555b0f23a | /catagory/JianzhiOffer/stage-08/0362-sliding-window-maximum.py | d4b60de378c963f1dbb38c482851fa989e3160f8 | [] | no_license | zzy1120716/my-nine-chapter | 04b3e4d43a0d8086e5c958b81a3dc4356622d65f | c7bf3eed366b91d6bdebb79d0f11680cf7c18344 | refs/heads/master | 2020-03-30T03:07:14.748145 | 2019-05-15T13:07:44 | 2019-05-15T13:07:44 | 150,670,072 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | """
362. 滑动窗口的最大值
中文English
给出一个可能包含重复的整数数组,和一个大小为 k 的滑动窗口,
从左到右在数组中滑动这个窗口,找到数组中每个窗口内的最大值。
样例
给出数组 [1,2,7,7,8], 滑动窗口大小为 k = 3. 返回 [7,7,8].
解释:
最开始,窗口的状态如下:
[|1, 2 ,7| ,7 , 8], 最大值为 7;
然后窗口向右移动一位:
[1, |2, 7, 7|, 8], 最大值为 7;
最后窗口再向右移动一位:
[1, 2, |7, 7, 8|], 最大值为 8.
挑战
O(n)时间,O(k)的额外空间
"""
from collections import deque
class Solution:
"""
@param nums: A list of integers.
@param k: An integer
@return: The maximum number inside the window at each moving.
"""
def maxSlidingWindow(self, nums, k):
# write your code here
if not nums:
return []
res = []
stack = deque()
for i in range(k):
self.push(nums, stack, i)
res.append(nums[stack[0]])
for i in range(k, len(nums)):
if stack[0] <= i - k:
stack.popleft()
self.push(nums, stack, i)
res.append(nums[stack[0]])
return res
def push(self, nums, stack, i):
while stack and nums[i] > nums[stack[-1]]:
stack.pop()
stack.append(i)
if __name__ == '__main__':
print(Solution().maxSlidingWindow([1, 2, 7, 7, 8], 3))
| [
"zzy1120716@126.com"
] | zzy1120716@126.com |
bc0eded4ab8b63a7876ed549115535c50a2aa105 | 383fe2d9b3d2c6adf315ae547226a57f2a8921f1 | /trunk/Communities/content/dc.py | f5b7377a099d5ec3ee705e81e992f2f2d0b52cbe | [] | no_license | BGCX261/zmetadata-svn-to-git | b03602998893dbcfe18581539735d32a17d24da7 | 1270067f91c4c61423042bad15086e2240bcdb4c | refs/heads/master | 2021-03-12T20:10:53.933178 | 2015-08-25T15:21:09 | 2015-08-25T15:21:09 | 41,587,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | # -*- coding: utf-8 -*-
#
# File: dc.py
#
# Copyright (c) 2009 by []
# Generator: ArchGenXML Version 2.3
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """unknown <unknown>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
import interfaces
from Products.Communities.content.setup import STDSetup
from Products.Communities.content.dcfields import DCFields
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.Communities.config import *
##code-section module-header #fill in your manual code here
##/code-section module-header
schema = Schema((
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
DCSetup_schema = BaseSchema.copy() + \
getattr(STDSetup, 'schema', Schema(())).copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
##/code-section after-schema
class DCSetup(STDSetup, BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IDCSetup)
meta_type = 'DCSetup'
_at_rename_after_creation = True
schema = DCSetup_schema
##code-section class-header #fill in your manual code here
_Fields = DCFields
##/code-section class-header
# Methods
registerType(DCSetup, PROJECTNAME)
# end of class DCSetup
##code-section module-footer #fill in your manual code here
##/code-section module-footer
| [
"you@example.com"
] | you@example.com |
1444aa825e32179614189c689696bc11a5dd6ef3 | 3b9d763180410bf0abf5b9c37391a64319efe839 | /toontown/coghq/CashbotMintLavaRoomFoyer_Action00.py | d64f18cef53a6cc4ab3181758e21a4c3930593ea | [] | no_license | qphoton/Reverse_Engineering_Project_ToonTown | 442f15d484324be749f6f0e5e4e74fc6436e4e30 | 11468ab449060169191366bc14ff8113ee3beffb | refs/heads/master | 2021-05-08T00:07:09.720166 | 2017-10-21T02:37:22 | 2017-10-21T02:37:22 | 107,617,661 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,306 | py | # File: C (Python 2.4)
from toontown.coghq.SpecImports import *
GlobalEntities = {
1000: {
'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE18a',
'wantDoors': 1 },
1001: {
'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None },
0: {
'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [] },
10009: {
'type': 'attribModifier',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10008,
'attribName': 'modelPath',
'recursive': 1,
'typeName': 'model',
'value': '' },
10017: {
'type': 'attribModifier',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10008,
'attribName': 'scale',
'recursive': 1,
'typeName': 'model',
'value': 'Vec3(.955,1,1)' },
10015: {
'type': 'crate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10014,
'pos': Point3(0.0, 0.0, 0.0),
'scale': 0.92000000000000004,
'crushCellId': None,
'gridId': 10014,
'modelType': 1,
'pushable': 1 },
10014: {
'type': 'grid',
'name': 'crateGrid',
'comment': '',
'parentEntId': 10003,
'pos': Point3(-6.7323083877599998, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'cellSize': 3.0,
'numCol': 4,
'numRow': 2 },
10005: {
'type': 'healBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(19.0611743927, -20.782667159999999, 0.0),
'hpr': Vec3(160.01689147900001, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 8,
'rewardPerGrabMax': 0 },
10001: {
'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(-7.8967208862299998, 21.012916564899999, 0.0),
'hpr': Vec3(180.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_F1.bam' },
10002: {
'type': 'model',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(-17.873947143599999, 16.280229568500001, 0.0),
'hpr': Vec3(270.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_E.bam' },
10006: {
'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(20.917299270600001, 20.209445953399999, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/CBMetalCrate.bam' },
10007: {
'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10000,
'pos': Point3(-18.3651504517, -19.269884109500001, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam' },
10018: {
'type': 'model',
'name': 'middle',
'comment': '',
'parentEntId': 10008,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam' },
10019: {
'type': 'model',
'name': 'copy of middle',
'comment': '',
'parentEntId': 10008,
'pos': Point3(-5.7235732078600003, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam' },
10020: {
'type': 'model',
'name': 'copy of middle',
'comment': '',
'parentEntId': 10008,
'pos': Point3(5.7199997901900002, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam' },
10021: {
'type': 'model',
'name': 'copy of middle',
'comment': '',
'parentEntId': 10008,
'pos': Point3(11.4399995804, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam' },
10000: {
'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1 },
10003: {
'type': 'nodepath',
'name': 'cratePuzzle',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': 1 },
10008: {
'type': 'nodepath',
'name': 'wall',
'comment': '',
'parentEntId': 0,
'pos': Point3(13.4399995804, 6.57999992371, 0.0),
'hpr': Point3(270.0, 0.0, 0.0),
'scale': Vec3(1.95812249184, 1.5, 1.7999999523200001) },
10016: {
'type': 'stomper',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10014,
'pos': Point3(-4.0493636131299997, 3.45528435707, 0.0),
'hpr': Point3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'crushCellId': None,
'damage': 6,
'headScale': Point3(4.0, 3.0, 4.0),
'modelPath': 0,
'motion': 3,
'period': 5.0,
'phaseShift': 0.0,
'range': 15.0,
'shaftScale': Point3(0.75, 10.0, 0.75),
'soundLen': 0,
'soundOn': 1,
'soundPath': 1,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0 } }
Scenario0 = { }
levelSpec = {
'globalEntities': GlobalEntities,
'scenarios': [
Scenario0] }
| [
"Infinitywilee@rocketmail.com"
] | Infinitywilee@rocketmail.com |
197fcceffaa5b82ddd7b54391447a2a72d81ed69 | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/trainer_v2/custom_loop/demo/demo.py | 20066d8f5c923610ff3b9c75a1dfac1c5d125122 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 2,906 | py | import os
import sys
import numpy as np
from data_generator.tokenizer_wo_tf import get_tokenizer, pretty_tokens
from trainer_v2.custom_loop.modeling_common.tf_helper import distribute_dataset
from trainer_v2.custom_loop.neural_network_def.siamese import ModelConfig200_200
from trainer_v2.custom_loop.train_loop_helper import get_strategy_from_config
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from taskman_client.wrapper3 import report_run3
from trainer_v2.chair_logging import c_log
from trainer_v2.custom_loop.dataset_factories import get_two_seg_data
from trainer_v2.custom_loop.run_config2 import get_run_config2_nli, RunConfig2
from trainer_v2.train_util.arg_flags import flags_parser
import tensorflow as tf
from keras import backend as K
def load_local_decision_nli(model_path):
model = tf.keras.models.load_model(model_path)
local_decision_layer_idx = 12
local_decision_layer = model.layers[local_decision_layer_idx]
print("Local decision layer", local_decision_layer.name)
new_outputs = [local_decision_layer.output, model.outputs]
fun = K.function([model.input, ], new_outputs) # evaluation function
return fun
@report_run3
def main(args):
c_log.info("Start {}".format(__file__))
run_config: RunConfig2 = get_run_config2_nli(args)
model_config = ModelConfig200_200()
strategy = get_strategy_from_config(run_config)
model_path = run_config.eval_config.model_save_path
fun = load_local_decision_nli(model_path)
def dataset_factory(input_files, is_for_training):
return get_two_seg_data(input_files, run_config, model_config, is_for_training)
tokenizer = get_tokenizer()
eval_dataset = dataset_factory(run_config.dataset_config.eval_files_path, False)
eval_dataset = eval_dataset.take(10)
eval_dataset = distribute_dataset(strategy, eval_dataset)
batch_size = run_config.common_run_config.batch_size
iterator = iter(eval_dataset)
for batch in iterator:
x, y = batch
z, z_label_l = fun(x)
z_label = z_label_l[0]
input_ids1, _, input_ids2, _ = x
for i in range(batch_size):
pred = np.argmax(z_label[i])
print("Pred: ", pred, " label :", y[i])
tokens = tokenizer.convert_ids_to_tokens(input_ids1.numpy()[i])
print("prem: ", pretty_tokens(tokens, True))
input_ids2_np = input_ids2.numpy()[i]
tokens = tokenizer.convert_ids_to_tokens(input_ids2_np[:100])
print("hypo1: ", pretty_tokens(tokens, True))
tokens = tokenizer.convert_ids_to_tokens(input_ids2_np[100:])
print("hypo2: ", pretty_tokens(tokens, True))
print("local decisions: ", np.argmax(z[i], axis=1))
print(z[i])
print()
input("Press enter to continue")
if __name__ == "__main__":
args = flags_parser.parse_args(sys.argv[1:])
main(args)
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
5e430023a77a7b01f693a1109ec471faaa60eb9c | 084a13b6524e21914826e842eeefefd09570a970 | /experiments/atari_easy/space_invaders/models/dqn_baseline/src/model.py | 4a031ef63acf4fda3d6e37fc7cea3c4ff9e410bb | [
"MIT"
] | permissive | michalnand/reinforcement_learning | 28aa0e2c92b6112cf366eff0e0d6a78b9a56e94f | 01635014a37a4c871766b4cdd2caaa26a0c2d8cc | refs/heads/main | 2023-06-01T10:27:36.601631 | 2023-02-12T19:46:01 | 2023-02-12T19:46:01 | 217,841,101 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,771 | py | import torch
import torch.nn as nn
class NoisyLinear(torch.nn.Module):
def __init__(self, in_features, out_features, sigma = 1.0):
super(NoisyLinear, self).__init__()
self.out_features = out_features
self.in_features = in_features
self.sigma = sigma
self.weight = nn.Parameter(torch.zeros(in_features, out_features))
torch.nn.init.xavier_uniform_(self.weight)
self.bias = nn.Parameter(torch.zeros(out_features))
self.weight_noise = nn.Parameter(torch.zeros(in_features, out_features))
torch.nn.init.xavier_uniform_(self.weight_noise)
self.bias_noise = nn.Parameter((0.1/out_features)*torch.randn(out_features))
def forward(self, x):
col_noise = torch.randn((1, self.out_features)).to(x.device).detach()
row_noise = torch.randn((self.in_features, 1)).to(x.device).detach()
weight_noise = self.sigma*row_noise.matmul(col_noise)
bias_noise = self.sigma*torch.randn((self.out_features)).to(x.device).detach()
weight_noised = self.weight + self.weight_noise*weight_noise
bias_noised = self.bias + self.bias_noise*bias_noise
return x.matmul(weight_noised) + bias_noised
class Model(torch.nn.Module):
def __init__(self, input_shape, outputs_count):
super(Model, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.input_shape = input_shape
self.outputs_count = outputs_count
input_channels = self.input_shape[0]
input_height = self.input_shape[1]
input_width = self.input_shape[2]
fc_inputs_count = 128*(input_width//16)*(input_height//16)
self.layers_features = [
nn.Conv2d(input_channels, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Flatten()
]
self.layers_value = [
nn.Linear(fc_inputs_count, 512),
nn.ReLU(),
nn.Linear(512, 1)
]
self.layers_advantage = [
NoisyLinear(fc_inputs_count, 512),
nn.ReLU(),
NoisyLinear(512, outputs_count)
]
for i in range(len(self.layers_features)):
if hasattr(self.layers_features[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_features[i].weight)
for i in range(len(self.layers_value)):
if hasattr(self.layers_value[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_value[i].weight)
for i in range(len(self.layers_advantage)):
if hasattr(self.layers_advantage[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_advantage[i].weight)
self.model_features = nn.Sequential(*self.layers_features)
self.model_features.to(self.device)
self.model_value = nn.Sequential(*self.layers_value)
self.model_value.to(self.device)
self.model_advantage = nn.Sequential(*self.layers_advantage)
self.model_advantage.to(self.device)
print("model_dqn")
print(self.model_features)
print(self.model_value)
print(self.model_advantage)
print("\n\n")
def forward(self, state):
features = self.model_features(state)
value = self.model_value(features)
advantage = self.model_advantage(features)
result = value + advantage - advantage.mean(dim=1, keepdim=True)
return result
def save(self, path):
print("saving ", path)
torch.save(self.model_features.state_dict(), path + "model_features.pt")
torch.save(self.model_value.state_dict(), path + "model_value.pt")
torch.save(self.model_advantage.state_dict(), path + "model_advantage.pt")
def load(self, path):
print("loading ", path)
self.model_features.load_state_dict(torch.load(path + "model_features.pt", map_location = self.device))
self.model_value.load_state_dict(torch.load(path + "model_value.pt", map_location = self.device))
self.model_advantage.load_state_dict(torch.load(path + "model_advantage.pt", map_location = self.device))
self.model_features.eval()
self.model_value.eval()
self.model_advantage.eval()
def get_activity_map(self, state):
state_t = torch.tensor(state, dtype=torch.float32).detach().to(self.device).unsqueeze(0)
features = self.model_features(state_t)
features = features.reshape((1, 128, 6, 6))
upsample = nn.Upsample(size=(self.input_shape[1], self.input_shape[2]), mode='bicubic')
features = upsample(features).sum(dim = 1)
result = features[0].to("cpu").detach().numpy()
k = 1.0/(result.max() - result.min())
q = 1.0 - k*result.max()
result = k*result + q
return result
if __name__ == "__main__":
batch_size = 8
channels = 4
height = 96
width = 96
actions_count = 9
state = torch.rand((batch_size, channels, height, width))
model = Model((channels, height, width), actions_count)
q_values = model.forward(state)
print(q_values.shape)
| [
"michal.nand@gmail.com"
] | michal.nand@gmail.com |
5f3cfb719b58e5c7c05e33d5bb548c5f0a306fa7 | 5168da0fb501135a3c86e4e95679f54a825d69d0 | /openquake/hazardlib/tests/gsim/allen_2012_test.py | 72173ea3d3ef0d060765778b3e310674efae37b0 | [
"AGPL-3.0-only",
"BSD-3-Clause"
] | permissive | GFZ-Centre-for-Early-Warning/shakyground | 266b29c05ea2cfff6d9d61f21b5114282c6fa117 | 0da9ba5a575360081715e8b90c71d4b16c6687c8 | refs/heads/master | 2023-06-01T21:41:11.127323 | 2018-10-09T10:31:48 | 2018-10-09T10:31:48 | 144,732,068 | 1 | 3 | BSD-3-Clause | 2019-11-18T07:58:49 | 2018-08-14T14:32:50 | Python | UTF-8 | Python | false | false | 1,264 | py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.allen_2012 import Allen2012
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
import numpy
# Test data generated from EQRM implementation.
class Allen2012TestCase(BaseGSIMTestCase):
GSIM_CLASS = Allen2012
def test_mean(self):
self.check('A12/ALLEN2012_MEAN.csv',
max_discrep_percentage=0.4)
def test_std_total(self):
self.check('A12/ALLEN2012_STD_TOTAL.csv',
max_discrep_percentage=0.1)
| [
"mhaas@gfz-potsdam.de"
] | mhaas@gfz-potsdam.de |
2d9bba4e31f58ea55ec61a6c6c7285e3cf7e8ec9 | 868cd4895a8da17a7e3e2c8da0ec9e139f8d0c30 | /model/sample/fashion/keras_fashion_save.py | db5946f50bb4623f6d9dbc90553ff5c81a33419f | [] | no_license | inJAJA/Study | 35d4e410df7b476a4c298664bb99ce9b09bf6296 | c2fd9a1e1f3a31cb3737cbb4891d848cc802f1d4 | refs/heads/master | 2022-12-21T11:41:15.396610 | 2020-09-20T23:51:45 | 2020-09-20T23:51:45 | 263,212,524 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,958 | py | # 과제 2
# Sequential형으로 완성하시오.
# 하단에 주석으로 acc와 loss결과 명시하시오
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from keras.utils.np_utils import to_categorical
#1. data
from keras.datasets import fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print(x_train.shape) # (60000, 28, 28)
print(x_test.shape) # (10000, 28, 28)
# x : reshape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
print(x_train.shape) # (60000, 28, 28, 1)
# y : one hot encoding
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) # (60000, 10)
print(y_test.shape) # (10000, 10)
#2. model
model = Sequential()
model.add(Conv2D(100, (3, 3), input_shape = (28, 28, 1), padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(pool_size = 3))
model.add(Dropout(0.2))
model.add(Conv2D(80, (3, 3), padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(pool_size = 3))
model.add(Dropout(0.2))
model.add(Conv2D(60, (3, 3), padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(pool_size = 3))
model.add(Dropout(0.2))
model.add(Conv2D(40, (3, 3), padding = 'same', activation = 'relu'))
model.add(Dropout(0.2))
model.add(Conv2D(20, (3, 3), padding = 'same', activation = 'relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax'))
""" model_save """
model.save('./model/sample/fashion/fashion_model_save.h5')
# checkpoint
from keras.callbacks import ModelCheckpoint
modelpath = ('./model/sample/fashion/fashion_checkpoint_best_{epoch:02d}-{val_loss:.4f}.hdf5')
checkpoint = ModelCheckpoint(filepath = modelpath, monitor = 'val_loss',
save_best_only = True, save_weights_only = False)
#3. fit
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 50, batch_size = 64, callbacks = [checkpoint],
validation_split = 0.2, shuffle = True, verbose =2 )
""" save_weights """
model.save_weights('./model/sample/fashion/fashion_save_weights.h5')
#4. evaluate
loss, acc = model.evaluate(x_test, y_test, batch_size =64)
print('loss: ', loss)
print('acc: ', acc)
# acc: 0.9114999771118164
#3. fit
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 50, batch_size = 64,
validation_split = 0.2, shuffle = True, verbose =2 )
#4. evaluate
loss, acc = model.evaluate(x_test, y_test, batch_size =64)
print('loss: ', loss)
print('acc: ', acc)
# acc: 0.9114999771118164
| [
"zaiin4050@gmail.com"
] | zaiin4050@gmail.com |
610c90abeea73d06318c9768d6a3ccd4ee7ca167 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201029154124.py | 144d082f653b6fb9010adafaf907c6ceb91d68c2 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,711 | py | from django import forms
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
from wagtail.contrib.table_block.blocks import TableBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self) -> str:
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
return ''
from django.core.exception
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
internal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
class RadioSelectBlock(blocks.ChoiceBlock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.field.widget = forms.RadioSelect(
choices=self.field.widget.choices
)
class ImageAndTextBlock(blocks.StructBlock):
image = ImageChooserBlock(help_text='Obraz automatycznie przycięty do rozmiaru 786 na 552 px.')
image_alignment = RadioSelectBlock(
choices = (
('left','Opraz po lewej stronie'),
('right', 'Obraz po prawej stronie'),
),
default = 'left',
help_text = 'Obraz po lewej stronie, tekst po prawej lub obraz po prawej stronie tekst po lewej.'
)
title = blocks.CharBlock(
max_length=60,
help_text='Maksymalna długość 60 znaków.'
)
text = blocks.CharBlock(
max_length = 140,
required = False,
)
link = Link()
class Meta:
template = 'streams/image_and_text_block.html'
icon = 'image'
label = 'Obraz & Tekst'
class CallToActionBlock(blocks.StructBlock):
title =blocks.CharBlock(
max_length = 200,
help_text = 'Maksymalnie 200 znaków.'
)
link = Link()
class Meta:
template = 'streams/call_to_action_block.html'
icon = 'plus'
label = 'Wezwanie do działania'
class PricingTableBlock(TableBlock):
"""Blok tabeli cen."""
class Meta:
template = 'streams/pricing_table_block.html'
label = 'Tabela cen'
icon = 'table'
help_text = 'Twoje tabele z cenami powinny zawierać zawsze 4 kolumny.'
'''
class RichTextWithTitleBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=50)
context = blocks.RichTextBlock(features=[])
class Meta:
template = 'streams/simple_richtext_block.html'
''' | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
485bef81f64f6243691a9ec5a7e5c712d0c9c91b | bcaaf8535f639be14558216fb45cab6b4635895b | /list/020 Majority Element Efficient.py | 135fa26b6bac2c4e9bf494cf366e22bffaa22a8e | [] | no_license | RavinderSinghPB/Data-Structure-And-Algorithm-Python | 7f3b61216318e58eb58881d5181561d8e06b092b | 12a126803f4c6bee0e6dbd380604f703cf678de4 | refs/heads/main | 2023-01-25T03:47:16.264926 | 2020-12-02T04:45:02 | 2020-12-02T04:45:02 | 316,112,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def findMajority(arr, n):
res = 0
count = 1
for i in range(1, n):
if arr[res] == arr[i]:
count += 1
else:
count -= 1
if count == 0:
res = i
count = 1
count = 0
for i in range(0, n):
if arr[res] == arr[i]:
count += 1
if count <= n // 2:
res = -1
return res
if __name__ == "__main__":
arr = [8, 7, 6, 8, 6, 6, 6, 6]
n = len(arr)
idx = findMajority(arr, n)
if idx != -1:
print(arr[idx])
| [
"ravindersingh.gfg@gmail.com"
] | ravindersingh.gfg@gmail.com |
187a0a5b1b63e6fb5ecdf4a16d709fada04e53b2 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dataartsstudio/huaweicloudsdkdataartsstudio/v1/model/create_workspace_params.py | 69a6d3bae31e48ef092dcf6aae5927f4f94f6316 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,989 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateWorkspaceParams:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bad_record_location_name': 'str',
'description': 'str',
'eps_id': 'str',
'job_log_location_name': 'str',
'name': 'str'
}
attribute_map = {
'bad_record_location_name': 'bad_record_location_name',
'description': 'description',
'eps_id': 'eps_id',
'job_log_location_name': 'job_log_location_name',
'name': 'name'
}
def __init__(self, bad_record_location_name=None, description=None, eps_id=None, job_log_location_name=None, name=None):
"""CreateWorkspaceParams
The model defined in huaweicloud sdk
:param bad_record_location_name: DLI脏数据OBS路径
:type bad_record_location_name: str
:param description: 工作空间描述
:type description: str
:param eps_id: 企业项目id,如果当前为公有云,且用户开启企业项目,则必选
:type eps_id: str
:param job_log_location_name: 作业日志OBS路径
:type job_log_location_name: str
:param name: 工作空间名称
:type name: str
"""
self._bad_record_location_name = None
self._description = None
self._eps_id = None
self._job_log_location_name = None
self._name = None
self.discriminator = None
if bad_record_location_name is not None:
self.bad_record_location_name = bad_record_location_name
if description is not None:
self.description = description
self.eps_id = eps_id
if job_log_location_name is not None:
self.job_log_location_name = job_log_location_name
self.name = name
@property
def bad_record_location_name(self):
"""Gets the bad_record_location_name of this CreateWorkspaceParams.
DLI脏数据OBS路径
:return: The bad_record_location_name of this CreateWorkspaceParams.
:rtype: str
"""
return self._bad_record_location_name
@bad_record_location_name.setter
def bad_record_location_name(self, bad_record_location_name):
"""Sets the bad_record_location_name of this CreateWorkspaceParams.
DLI脏数据OBS路径
:param bad_record_location_name: The bad_record_location_name of this CreateWorkspaceParams.
:type bad_record_location_name: str
"""
self._bad_record_location_name = bad_record_location_name
@property
def description(self):
"""Gets the description of this CreateWorkspaceParams.
工作空间描述
:return: The description of this CreateWorkspaceParams.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateWorkspaceParams.
工作空间描述
:param description: The description of this CreateWorkspaceParams.
:type description: str
"""
self._description = description
@property
def eps_id(self):
"""Gets the eps_id of this CreateWorkspaceParams.
企业项目id,如果当前为公有云,且用户开启企业项目,则必选
:return: The eps_id of this CreateWorkspaceParams.
:rtype: str
"""
return self._eps_id
@eps_id.setter
def eps_id(self, eps_id):
"""Sets the eps_id of this CreateWorkspaceParams.
企业项目id,如果当前为公有云,且用户开启企业项目,则必选
:param eps_id: The eps_id of this CreateWorkspaceParams.
:type eps_id: str
"""
self._eps_id = eps_id
@property
def job_log_location_name(self):
"""Gets the job_log_location_name of this CreateWorkspaceParams.
作业日志OBS路径
:return: The job_log_location_name of this CreateWorkspaceParams.
:rtype: str
"""
return self._job_log_location_name
@job_log_location_name.setter
def job_log_location_name(self, job_log_location_name):
"""Sets the job_log_location_name of this CreateWorkspaceParams.
作业日志OBS路径
:param job_log_location_name: The job_log_location_name of this CreateWorkspaceParams.
:type job_log_location_name: str
"""
self._job_log_location_name = job_log_location_name
@property
def name(self):
"""Gets the name of this CreateWorkspaceParams.
工作空间名称
:return: The name of this CreateWorkspaceParams.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateWorkspaceParams.
工作空间名称
:param name: The name of this CreateWorkspaceParams.
:type name: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateWorkspaceParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
5a55cace1700df5eeea3d479465e72d438fb341c | 2cd86616a2d29b2a19ddb906c5216ed7a48d2208 | /biobb_vs/gromacs_wrapper/genion.py | 83452c81f210fea238442bc7e2bbbfc922723b74 | [
"Apache-2.0"
] | permissive | bioexcel/biobb_vs_alpha | e0c8ab1bad864bd3a87cafa1ee7f6eddc50ee3ae | 5a7403bad0935ee4380c377d930bd24967770501 | refs/heads/master | 2021-09-26T00:24:15.473393 | 2018-10-26T12:57:16 | 2018-10-26T12:57:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,457 | py | #!/usr/bin/env python
"""Python wrapper for the GROMACS genion module
"""
import sys
import json
import configuration.settings as settings
from command_wrapper import cmd_wrapper
from tools import file_utils as fu
class Genion(object):
"""Wrapper for the 5.1.2 version of the genion module
Args:
input_tpr_path (str): Path to the input portable run input TPR file.
output_gro_path (str): Path to the input structure GRO file.
input_top_zip_path (str): Path the input TOP topology in zip format.
output_top_zip_path (str): Path the output topology TOP and ITP files zipball.
properties (dic):
output_top_path (str): Path the output topology TOP file.
replaced_group (str): Group of molecules that will be replaced by the solvent.
neutral (bool): Neutralize the charge of the system.
concentration (float): Concentration of the ions in (mol/liter).
seed (int): Seed for random number generator.
gmx_path (str): Path to the GROMACS executable binary.
"""
def __init__(self, input_tpr_path, output_gro_path, input_top_zip_path,
output_top_zip_path, properties, **kwargs):
if isinstance(properties, basestring):
properties=json.loads(properties)
self.input_tpr_path = input_tpr_path
self.output_gro_path = output_gro_path
self.input_top_zip_path = input_top_zip_path
self.output_top_zip_path = output_top_zip_path
self.output_top_path = properties.get('output_top_path','gio.top')
self.replaced_group = properties.get('replaced_group','SOL')
self.neutral = properties.get('neutral',False)
self.concentration = properties.get('concentration',0.05)
self.seed = properties.get('seed',1993)
self.gmx_path = properties.get('gmx_path',None)
self.mutation = properties.get('mutation',None)
self.step = properties.get('step',None)
self.path = properties.get('path','')
self.mpirun = properties.get('mpirun', False)
self.mpirun_np = properties.get('mpirun_np', None)
self.global_log= properties.get('global_log', None)
def launch(self):
"""Launches the execution of the GROMACS genion module.
"""
if self.global_log is not None:
if self.concentration:
self.global_log.info(19*' '+'To reach up '+str(self.concentration)+' mol/litre concentration')
out_log, err_log = fu.get_logs(path=self.path, mutation=self.mutation, step=self.step)
self.output_top_path = fu.add_step_mutation_path_to_name(self.output_top_path, self.step, self.mutation)
# Unzip topology to topology_out
fu.unzip_top(zip_file=self.input_top_zip_path, top_file=self.output_top_path)
gmx = 'gmx' if self.gmx_path is None else self.gmx_path
cmd = [gmx, 'genion',
'-s', self.input_tpr_path,
'-o', self.output_gro_path,
'-p', self.output_top_path]
if self.mpirun_np is not None:
cmd.insert(0, str(self.mpirun_np))
cmd.insert(0, '-np')
if self.mpirun:
cmd.insert(0, 'mpirun')
if self.neutral:
cmd.append('-neutral')
if self.concentration:
cmd.append('-conc')
cmd.append(str(self.concentration))
if self.seed is not None:
cmd.append('-seed')
cmd.append(str(self.seed))
if self.mpirun:
cmd.append('<<<')
cmd.append('\"'+self.replaced_group+'\"')
else:
cmd.insert(0, '|')
cmd.insert(0, '\"'+self.replaced_group+'\"')
cmd.insert(0, 'echo')
command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)
returncode = command.launch()
# zip new_topology
fu.zip_top(self.output_top_path, self.output_top_zip_path, remove_files=True)
return returncode
#Creating a main function to be compatible with CWL
def main():
system=sys.argv[1]
step=sys.argv[2]
properties_file=sys.argv[3]
prop = settings.YamlReader(properties_file, system).get_prop_dic()[step]
Genion(input_tpr_path = sys.argv[4],
output_gro_path = sys.argv[5],
input_top_zip_path = sys.argv[6],
output_top_zip_path = sys.argv[7],
properties=prop).launch()
if __name__ == '__main__':
main()
| [
"andriopau@gmail.com"
] | andriopau@gmail.com |
f78acc475c7f7428db7e6c915fe5f87224ca1fd2 | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Python/AdafruitIO/PublishMQTT.py | 4768892f13f10590fc92c6a35aa4d873ecebd641 | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | #!/usr/bin/python
import paho.mqtt.publish as publish
import time
print("Sending 0...")
publish.single("ledStatus", "0", hostname="raspi13")
time.sleep(1)
print("Sending 1...")
publish.single("ledStatus", "1", hostname="raspi13")
| [
"robin.greig@calalta.com"
] | robin.greig@calalta.com |
7b709a119701579dbcb5028cb79c513adfa97765 | f1fcd165cd8444310ce5d201e481e3982dc28110 | /easy/1901/190114/jang.py | bdbf8e0098b135c5249f6bb41fd103aac1ca0c6f | [] | no_license | JoosJuliet/algoStudy | 310a71a0fcc8f3c23281544cf3458ed999040176 | 3fc1e850f9d8b9f290f41fddd59ff403fbfffa05 | refs/heads/master | 2020-04-20T19:26:25.485875 | 2019-03-27T22:37:27 | 2019-03-27T22:37:27 | 169,049,593 | 1 | 0 | null | 2019-02-04T08:43:07 | 2019-02-04T08:43:07 | null | UTF-8 | Python | false | false | 264 | py | d, m, y = map(int, input().split())
d2, m2, y2 = map(int, input().split())
fine = 0
if y - y2 > 0:
fine += (y-y2)*10000
elif y - y2 == 0 and m - m2 > 0:
fine += (m-m2)*500
elif y - y2 == 0 and m - m2 == 0 and d - d2 > 0:
fine += (d-d2)*15
print(fine) | [
"wkdtjsgur100@naver.com"
] | wkdtjsgur100@naver.com |
8a974657debbb33dd868b65d2757c458567a3ffd | b0a162b1db3004b30cd735500971edea39e775ed | /wave1/Labs/Lab1of2.2.py | eec6582f29a7c4c13c1223c12938dc4853b6c6c6 | [] | no_license | geofferyj/WEJAPA_INTERNSHIP | 40da98c335affbbaf74d018d8a2f38fb30183f10 | 92a101d0280e0f732dc3cfd8727e436de86cdb62 | refs/heads/master | 2022-12-08T04:40:18.627904 | 2020-08-16T07:39:51 | 2020-08-16T07:39:51 | 286,264,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #Quiz: Assign and Modify Variables
#Now it's your turn to work with variables. The comments in this quiz (the lines that begin with #) have instructions for creating and modifying variables. After each comment write a line of code that implements the instruction.
#Note that this code uses scientific notation to define large numbers. 4.445e8 is equal to 4.445 * 10 ** 8 which is equal to 444500000.0.
# Write your function here. Make sure to use "population_density" as the name of the fucntion. so, the test below works.
def population_density(val1, val2):
return val1/val2
# test cases for your function Dont change anything below this comment.
test1 = population_density(10, 1)
expected_result1 = 10
print("expected result: {}, actual result: {}".format(expected_result1, test1))
test2 = population_density(864816, 121.4)
expected_result2 = 7123.6902801
print("expected result: {}, actual result: {}".format(expected_result2, test2))
| [
"geofferyjoseph1@gmail.com"
] | geofferyjoseph1@gmail.com |
45f625839b142e095671acdb09a4ea53a6a605a6 | 92a0977e694e49ca70adbcaaa0fd6a66576f85e6 | /blog/migrations/0001_initial.py | d46ac7e7b66d02b5a4f68a2609c3a3a894175958 | [] | no_license | Melody1992/my-first-blog | 9ca6cbf8f47257b1d7d12af98d8797cb3d3f2972 | 40e44bcc48883626e6ecc34417ded1aa7de12d08 | refs/heads/master | 2021-01-20T12:16:45.719637 | 2017-08-29T10:00:44 | 2017-08-29T10:00:44 | 101,709,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-29 09:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"you@example.com"
] | you@example.com |
195d63b02681ad0d2d5fb06c1b8351574c2a7ff4 | 59f8e783abe9949cf9e9aef5936d2349f7df7414 | /methyl/ma_analysis/epigenotyping-old/decodingpath.py | 026184ea4b5bc9b8fb3a55161e1cddb4b2520f1d | [] | no_license | bhofmei/analysis-scripts | c4d8eafde2834b542c71c305e66c4e6f8a6e2c57 | 189bf355f0f878c5603b09a06b3b50b61a11ad93 | refs/heads/master | 2021-01-17T17:26:30.799097 | 2019-10-27T12:49:10 | 2019-10-27T12:49:10 | 56,076,808 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | py | ### Decoding types ###
import pandas as pd
import numpy as np
import math
class DecodePath:
''' Base class for decoding types
'''
def __init__( self, df, transMat ):
# n bins, m states
self.labels = np.array( ['mother', 'MPV', 'father'] )
self.data = df # n x m
self.transitions = transMat # fraction probs not log, m x m
self.emissions = self._getEmissions( df ) # m x n
#print('*',self.emissions.dtype)
self.states, self.size = self.emissions.shape
def _getEmissions( self, data ):
#print(list(data))
idVars = ['sample','bin','prediction']
if 'num.feat' in list(data):
idVars += ['num.feat']
dfm = pd.melt( data, id_vars=idVars)
#print('--\n',dfm.iloc[0:4,:])
dfp = dfm.pivot( index='variable', columns='bin', values='value' )
#print(dfp.values.dtype)
dfe = dfp.reindex( self.labels )
return dfe.values.astype(np.float64)
#return dfe.values
class DecodeViterbi( DecodePath ):
''' Viterbi decoding
'''
def run( self ):
''' main function accessible by outside classes '''
self._initializeV()
self._fillV()
self._pathV()
return self.data
def _initializeV( self ):
# take log of transitions
self.log_transitions = np.log( self.transitions ) # m x m
# initialize empty data structures for dynamic programming
self.probabilities = np.zeros( (self.size, self.states) ) # n x m
self.traceback = np.zeros( (self.size, self.states), dtype=np.int8 ) # n x m
def _fillV( self ):
# loop through rows/bins
for i in range(self.size):
# loop through states
for j in range(self.states):
em = self.emissions[j,i] # note: m x n
maxS, maxP = self._computeScore( i, j, em )
self.probabilities[i,j] = maxS
self.traceback[i,j] = maxP
# end for j
# end for i
def _computeScore( self, i, j, prob ):
scores = np.array( [prob]*3 ) # 1 x m
for k in range(self.states):
if i != 0:
scores[k] += self.probabilities[i-1,k]
scores[k] += self.log_transitions[k,j]
# end for k
maxS = scores.max()
maxP = (-1 if i == 0 else scores.argmax() )
return maxS, maxP
def _pathV( self ):
# add columns to output
self.data['vit.score.mother'] = self.probabilities[:,0]
self.data['vit.score.MPV'] = self.probabilities[:,1]
self.data['vit.score.father'] = self.probabilities[:,2]
self.data['vit.prediction'] = 'NA'
vals = self.probabilities[self.size-1]
# start traceback
nextJ = vals.argmax()
for i in range( self.size-1, -1, -1):
nextJ = self._tracebackHelper( i, nextJ )
if nextJ == -1:
break # finished traceback
# end for i
def _tracebackHelper( self, i, j ):
# get column numer where to record decoded prediction
colI = np.nonzero(self.data.columns.values == 'vit.prediction')[0][0]
# get current label to record
label = self.labels[j]
self.data.iloc[i, colI] = label
# return next cell to travel to
return self.traceback[i,j]
class DecodeForwardBackward( DecodePath ):
''' Forward-backward decoding
'''
def run( self ):
''' main function accessible by outside classes '''
self._initializeF()
self._fillF()
self._pathF()
return self.data
def _initializeF( self ):
#print( '**',self.emissions.dtype )
# transform emissions from log to fractions
self.prob_emissions = np.exp( self.emissions )
#self.prob_emissions = [ [ math.exp(x) for x in self.emissions[y] ] for y in self.emissions ]
# initialize forward and backward dynamic programming structures
self.forward = np.zeros( (self.states, self.size+1) ) # m x n+1
self.forward[:,0] = 1.0/self.states
self.backward = np.zeros( (self.states, self.size+1) ) # m x n+1
self.backward[:,-1] = 1.0
# initialize posterior prob dist
self.posterior = np.zeros( (self.size, self.states) ) # n x m
def _fillF( self ):
# fill forward -> loop across bins
for i in range(self.size):
# get current column values
fCol = np.matrix( self.forward[:,i] )
# fill in next column
self.forward[:,i+1] = fCol * np.matrix( self.transitions ) * np.matrix( np.diag( self.prob_emissions[:,i] ) )
# normalize
self.forward[:,i+1] = self.forward[:,i+1] / np.sum( self.forward[:,i+1] )
# end for i
# fill backwards -> loop across bins
for i in range( self.size, 0, -1 ):
# get current column values
bRow = np.matrix( self.backward[:,i]).transpose()
# get values for next column
tmpCol = ( np.matrix(self.transitions) * np.matrix(np.diag(self.prob_emissions[:,i-1])) * bRow).transpose()
# normalize
self.backward[:,i-1] = tmpCol / np.sum( tmpCol )
# end for i
# combine
tmpPosterior = np.zeros((self.states, self.size))
tmpPosterior = np.array( self.forward[:,1:] ) * np.array( self.backward[:,:-1] )
# normalize
tmpPosterior = tmpPosterior / np.sum( tmpPosterior, 0)
self.posterior = np.transpose(tmpPosterior)
def _pathF( self ):
# add columns to output
self.data['fb.score.mother'] = self.posterior[:,0]
self.data['fb.score.MPV'] = self.posterior[:,1]
self.data['fb.score.father'] = self.posterior[:,2]
maxI = self.posterior.argmax( axis=1 )
self.data['fb.prediction'] = self.labels[maxI]
class DecodeAll( DecodeViterbi, DecodeForwardBackward ):
''' Viterbi and foward-backward decoding
'''
def run( self ):
''' main function accessible by outside classes '''
# Viterbi
self._initializeV()
self._fillV()
self._pathV()
# FB
self._initializeF()
self._fillF()
self._pathF()
return self.data
| [
"bhofmei@gmail.com"
] | bhofmei@gmail.com |
1b371ce2d76c8b9c0dafca699c63800a51a7d093 | 4d4fcde3efaa334f7aa56beabd2aa26fbcc43650 | /server/src/uds/migrations/0037_service_token.py | 95647ca5e24fc6567d3090492d73e90580495ee2 | [] | no_license | xezpeleta/openuds | a8b11cb34eb0ef7bb2da80f67586a81b2de229ef | 840a7a02bd7c9894e8863a8a50874cdfdbf30fcd | refs/heads/master | 2023-08-21T17:55:48.914631 | 2021-10-06T10:39:06 | 2021-10-06T10:39:06 | 414,489,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Generated by Django 3.0.3 on 2020-02-08 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('uds', '0036_auto_20200131_1224'),
]
operations = [
migrations.AddField(
model_name='service',
name='token',
field=models.CharField(blank=True, default=None, max_length=32, null=True, unique=True),
),
]
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
f2bdcb2ebe82957a5b18e994bc9df717a42e7ea2 | 99a4d88d2004bad1e9e79f92c33a9ab1eb5644c4 | /Solution/BOJ/11286 절댓값 힙.py | 94741ae799a6f6a4b3acd56a64aeef4332d76e6b | [] | no_license | ginger-kang/Problem-Solving | cb64a4f6a0275419fe7be67fb50a9eb48e4b5869 | 1fc074d39a47a416d990e6e3b95a6c9f62a838f7 | refs/heads/master | 2023-08-14T13:54:00.706663 | 2021-09-10T11:39:16 | 2021-09-10T11:39:16 | 255,123,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | import heapq
import sys
input = sys.stdin.readline
N = int(input())
q = []
for _ in range(N):
x = int(input())
if x != 0:
heapq.heappush(q, (abs(x), x))
else:
if not len(q):
print(0)
else:
print(heapq.heappop(q)[1])
| [
"kdhoon07@gmail.com"
] | kdhoon07@gmail.com |
4eda55367dffe239294e5f2e103ff3db01021f09 | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/18/e08b59a165fa00161174a93fd5908e78 | 4f3868130d32db869552206dcc0d483e87aa16e2 | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,174 | #!/usr/bin/env python
#coding:UTF-8
from audit_demo.utility.MySqlHelper import MySqlHelper
class s_g_u_relation(object):
def __init__(self):
self.__helper = MySqlHelper()
def get_uid(self,username):
sql = 'select u_id from u_table where u_name = %s'
try:
u_id = self.__helper.select(sql,username)[0][0]
return u_id
except Exception as e:
print(e)
return False
def get_gid(self,gpname):
sql = 'select g_id from g_table where g_name = %s'
try:
g_id = self.__helper.select(sql,gpname)[0][0]
return g_id
except Exception as e:
print(e)
return False
def get_sid(self,serip):
sql = 'select s_id from s_table where s_ip = %s'
try:
s_id = self.__helper.select(sql,serip)[0][0]
return s_id
except Exception as e:
print(e)
return False
def add_s_g(self,serip,gpname):
sid = str(self.get_sid(serip))
gid = str(self.get_gid(gpname))
sql = 'insert into s_g_u_relation(f_s_id,f_g_id) values(%s , %s)'
params = (sid,gid)
try:
self.__helper.insert_one(sql,params)
except Exception as e:
print(e)
return False
def add_s_u(self,serip,username):
sid = str(self.get_sid(serip))
uid = str(self.get_uid(username))
sql = 'insert into s_g_u_relation(f_s_id,f_u_id) values(%s , %s)'
params = (sid,uid)
try:
self.__helper.insert_one(sql,params)
except Exception as e:
print(e)
return False
def get_s_u_g_id(self, serip):
sid = str(self.get_sid(serip))
sql = 'select s_g_u_id from s_g_u_relation where f_s_id = %s'
params = (sid)
try:
tmplist = self.__helper.select(sql,params)
s_u_g_id_list = []
for i in tmplist:
t = i[0]
s_u_g_id_list.append(t)
return s_u_g_id_list
except Exception as e:
print(e)
return False
def get_s_g_id(self, gpname):
gid = str(self.get_gid(gpname))
sql = 'select s_g_u_id from s_g_u_relation where f_g_id = %s'
params = (gid)
try:
tmplist = self.__helper.select(sql,params)
s_g_id_list = []
for i in tmplist:
t = i[0]
s_g_id_list.append(t)
return s_g_id_list
except Exception as e:
print(e)
return False
def get_s_u_id(self, username):
uid = str(self.get_uid(username))
sql = 'select s_g_u_id from s_g_u_relation where f_u_id = %s'
params = (uid)
try:
tmplist = self.__helper.select(sql,params)
s_u_id_list = []
for i in tmplist:
t = i[0]
s_u_id_list.append(t)
return s_u_id_list
except Exception as e:
print(e)
return False
def get_s_u_ser(self, username):
uid = str(self.get_uid(username))
sql = 'select f_s_id from s_g_u_relation where f_u_id = %s'
params = (uid)
try:
tmplist = self.__helper.select(sql,params)
s_u_list = []
for i in tmplist:
t = i[0]
s_u_list.append(t)
return s_u_list
except Exception as e:
print(e)
return False
def del_s_g(self, gpname):
sql = 'delete from s_g_u_relation where s_g_u_id = %s'
if not self.get_s_g_id(gpname):
print('No relations of %s in s_g_u_relation table.' %gpname)
else:
s_g_id_list = self.get_s_g_id(gpname)
try:
for i in s_g_id_list:
params = i
self.__helper.delete(sql,params)
except Exception as e:
print(e)
def del_s_u(self, username):
sql = 'delete from s_g_u_relation where s_g_u_id = %s'
if not self.get_s_u_id(username):
print('No relations of %s in s_g_u_relation table.' %username)
else:
s_u_id_list = self.get_s_u_id(username)
try:
for i in s_u_id_list:
params = i
self.__helper.delete(sql,params)
except Exception as e:
print(e)
def del_s_g_u(self, serip):
sql = 'delete from s_g_u_relation where s_g_u_id = %s'
if not self.get_s_u_g_id(serip):
print('No relations of %s in s_g_u_relation table.' %serip)
else:
s_g_u_id_list = self.get_s_u_g_id(serip)
try:
for i in s_g_u_id_list:
params = i
self.__helper.delete(sql,params)
except Exception as e:
print(e)
'''
t = s_g_u_relation()
#t.add_s_g('192.168.0.1', 'gp2')
print(t.add_s_u('192.168.0.1', 'user2'))
'''
| [
"abigdream@hotmail.com"
] | abigdream@hotmail.com | |
47f6bad4ec08b075bab3d5983b0dee9335efe10b | ab11444273824fb46eac78d7f3dd532ae65e3bf3 | /doc/conf.py | 7d97517b8acb380b0f9be389abd16b9fa6517b2e | [
"MIT"
] | permissive | firasm/sphinx-comments | 30f2d262a723ca11e7e53f153506ee926d52e3b1 | 25db7a450af426fd898c4b1f8c656c786f37ca8c | refs/heads/master | 2022-12-01T21:40:05.283906 | 2020-08-10T17:00:26 | 2020-08-10T17:00:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,365 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "Sphinx Comments"
copyright = "2018, Chris Holdgraf"
author = "Chris Holdgraf"
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx_comments", "myst_parser"]
comments_config = {
# "hypothesis": True,
# "utterances": {
# "repo": "executablebooks/sphinx-comments",
# },
# "dokieli": True
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# CopyButton configuration
copybutton_prompt_text = ">>> "
# Switches for testing but shouldn't be activated in the live docs
# copybutton_only_copy_prompt_lines = False
# copybutton_remove_prompts = False
# copybutton_image_path = "test/TEST_COPYBUTTON.png"
# copybutton_selector = "div"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "SphinxCommentsdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"SphinxComments.tex",
"Sphinx Comments Documentation",
"Chris Holdgraf",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "SphinxComments", "Sphinx Comments Documentation", [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"SphinxComments",
"Sphinx Comments Documentation",
author,
"SphinxComments",
"One line description of project.",
"Miscellaneous",
),
]
| [
"choldgraf@berkeley.edu"
] | choldgraf@berkeley.edu |
b654052281e54dfedd953387d9a22ef9cbba28f0 | a2b598d8e89c1755f683d6b6fe35c3f1ef3e2cf6 | /past_archive/swexpert/3499(perfectShuffle).py | d2712767eb99f3281392018895df8ae2b11d6d43 | [
"MIT"
] | permissive | DongHyunByun/algorithm_practice | cbe82606eaa7f372d9c0b54679bdae863aab0099 | dcd595e6962c86f90f29e1d68f3ccc9bc673d837 | refs/heads/master | 2022-09-24T22:47:01.556157 | 2022-09-11T07:36:42 | 2022-09-11T07:36:42 | 231,518,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | for t in range(int(input())):
N=int(input())
L=input().split()
print(f"#{t+1}",end="")
if N%2==0:
k=int(N/2)
for i in range(k):
print("",L[i],end="")
print("",L[i+k],end="")
print("")
else:
K=int(N/2)
for i in range(K):
print("",L[i],end="")
print("",L[i+K+1],end="")
print("",L[K]) | [
"ngoodsamari@naver.com"
] | ngoodsamari@naver.com |
6d5e2b8faed3400b9be9ec71c62c78e65f70c8c5 | 9b36652dafb58888b7a584806ee69a33fcb609d5 | /objutils/tests/testTek.py | d26b7c1df514a10fceac180155a4ecf70ca605da | [] | no_license | pySART/objutils | db33e4576cf68111cb4debbafec06a0204844938 | 5ba4631b2245caae80d4dbe0053db0f2706ba53f | refs/heads/master | 2020-06-29T03:35:24.485977 | 2016-11-21T14:21:56 | 2016-11-21T14:21:56 | 74,451,500 | 5 | 2 | null | 2016-11-22T08:36:10 | 2016-11-22T08:36:10 | null | UTF-8 | Python | false | false | 961 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from objutils import loads, dumps
from objutils.section import Section
from objutils.image import Image, Builder
import unittest
TEK = b"""/B000100C576F77212044696420796F7520726561A5
/B010100D6C6C7920676F207468726F7567682061C1
/B020100E6C6C20746861742074726F75626C6520AF
/B0300D1B746F207265616420746869733F8D
/B03D001B"""
S19 = b"""S113B000576F77212044696420796F7520726561D8
S113B0106C6C7920676F207468726F756768206143
S113B0206C6C20746861742074726F75626C652036
S110B030746F207265616420746869733F59
S5030004F8"""
class TestRoundtrip(unittest.TestCase):
def testLoadsWorks(self):
data = loads("tek", TEK)
#data.hexdump()
#print(dumps("srec", data))
self.assertEqual(dumps("srec", data, s5record = True), S19)
def testDumpsWorks(self):
data = loads("srec", S19)
self.assertEqual(dumps("tek", data), TEK)
if __name__ == '__main__':
unittest.main()
| [
"cpu12.gems@googlemail.com"
] | cpu12.gems@googlemail.com |
10b2e78bdd20211096522dfd8c9647defebbde56 | 78cb6dadc7599e01b078682b175f21be673ed199 | /438. Find All Anagrams in a String.py | 6cc8986e066d54440059175d3e147ddeb642285b | [] | no_license | AlexWufan/leetcode-python | 5cf5f13dbc7d1e425fde646df618e50c488fa79f | 435323a9fcea6a4d09266785e88fb78735e0cc3e | refs/heads/master | 2021-01-13T00:49:49.870468 | 2018-04-13T18:44:19 | 2018-04-13T18:44:19 | 51,347,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
output = []
d= {}
pdic = {}
if len(s) < len(p): return output
window = s[0:len(p)]
window = list(window)
for x in window:
d[x] = d.get(x, 0) + 1
for x in p:
pdic[x] = pdic.get(x, 0) + 1
if d == pdic: output.append(0)
for i in range(len(p),len(s)):
d[window[0]] -= 1
if d[window[0]] == 0:
del d[window[0]]
del window[0]
window.append(s[i])
d[window[-1]] = d.get(window[-1], 0) + 1
if d == pdic:
output.append(i-len(p)+1)
return output
if __name__=='__main__':
asolution = Solution()
print(asolution.findAnagrams("cbaebabacd", "abc")) | [
"mengnanszw@gmail.com"
] | mengnanszw@gmail.com |
6331faf2779685d448c3fd00e25cd3fe87609f67 | 2173909e5a0a87d72f86f2805e602c1d73e07568 | /w3af-repo/w3af/core/controllers/misc/number_generator.py | ed8d1c1c56f485f8ad42374fc8ff73a1121aadf2 | [] | no_license | ZenSecurity/w3af-module | 78d603ed076f879b8bd280c0bf3382d153aaacec | 13967bffaa211fe7f793204796802f1a5967f1d7 | refs/heads/master | 2021-01-15T13:48:24.183830 | 2016-08-05T13:09:49 | 2016-08-05T13:09:49 | 40,010,219 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | """
number_generator.py
Copyright 2009 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from threading import Lock
class number_generator(object):
"""
The simplest class that returns a sequence of consecutive numbers.
This is used for assigning IDs to HTTP request and responses.
"""
def __init__(self):
"""
Start the counter and be thread safe.
"""
self._lock = Lock()
self._id = 0
def inc(self):
"""
:return: The next number.
"""
with self._lock:
self._id += 1
return self._id
def get(self):
"""
:return: The current number
"""
return self._id
def reset(self):
"""
Reset internal counter to 0.
"""
with self._lock:
self._id = 0
consecutive_number_generator = number_generator()
| [
"andres.riancho@gmail.com"
] | andres.riancho@gmail.com |
33e9c5a127191634c357502c02ba4bb43a209411 | e4616ae545872442f24b35e46d76d351edab22b0 | /test/sql/test_select.py | 4c00cb53c790e5d8c3c31a9b4e420e04e1325ebc | [
"MIT"
] | permissive | StefanosChaliasos/sqlalchemy | 0915d5fb66420eaf5dbb3468ed4a2c283f8802c0 | 8c228be322023041b11691d93dafa1be090f01a0 | refs/heads/master | 2022-12-05T02:43:43.684766 | 2020-08-26T13:12:24 | 2020-08-26T13:12:24 | 290,499,121 | 0 | 1 | MIT | 2020-08-26T13:08:17 | 2020-08-26T13:08:17 | null | UTF-8 | Python | false | false | 7,070 | py | from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import tuple_
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import fixtures
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable", column("otherid", Integer), column("othername", String)
)
metadata = MetaData()
parent = Table(
"parent",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
child = Table(
"child",
metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", ForeignKey("parent.id")),
Column("data", String(50)),
)
class FutureSelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_legacy_calling_style_kw_only(self):
stmt = select(
whereclause=table1.c.myid == table2.c.otherid
).add_columns(table1.c.myid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_legacy_calling_style_col_seq_only(self):
stmt = select([table1.c.myid]).where(table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_new_calling_style(self):
stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_kw_triggers_old_style(self):
assert_raises_message(
exc.ArgumentError,
r"select\(\) construct created in legacy mode, "
"i.e. with keyword arguments",
select,
table1.c.myid,
whereclause=table1.c.myid == table2.c.otherid,
)
def test_join_nofrom_implicit_left_side_explicit_onclause(self):
stmt = select(table1).join(table2, table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_explicit_left_side_explicit_onclause(self):
stmt = select(table1).join_from(
table1, table2, table1.c.myid == table2.c.otherid
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_implicit_left_side_implicit_onclause(self):
stmt = select(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_nofrom_explicit_left_side_implicit_onclause(self):
stmt = select(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_implicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join(table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_explicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_implicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_explicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_joins_w_filter_by(self):
stmt = (
select(parent)
.filter_by(data="p1")
.join(child)
.filter_by(data="c1")
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
.filter_by(otherid=5)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id, mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid "
"WHERE parent.data = :data_1 AND child.data = :data_2 "
"AND myothertable.otherid = :otherid_1",
checkparams={"data_1": "p1", "data_2": "c1", "otherid_1": 5},
)
def test_filter_by_no_property(self):
assert_raises_message(
exc.InvalidRequestError,
'Entity namespace for "mytable" has no property "foo"',
select(table1).filter_by,
foo="bar",
)
def test_select_tuple_outer(self):
stmt = select(tuple_(table1.c.myid, table1.c.name))
assert_raises_message(
exc.CompileError,
r"Most backends don't support SELECTing from a tuple\(\) object. "
"If this is an ORM query, consider using the Bundle object.",
stmt.compile,
)
def test_select_tuple_subquery(self):
subq = select(
table1.c.name, tuple_(table1.c.myid, table1.c.name)
).subquery()
stmt = select(subq.c.name)
# if we aren't fetching it, then render it
self.assert_compile(
stmt,
"SELECT anon_1.name FROM (SELECT mytable.name AS name, "
"(mytable.myid, mytable.name) AS anon_2 FROM mytable) AS anon_1",
)
| [
"mike_mp@zzzcomputing.com"
] | mike_mp@zzzcomputing.com |
9509c638c0f00031012f7d8b3195c967ca88f329 | 2a2ce1246252ef6f59e84dfea3888c5a98503eb8 | /examples/introduction.to.programming.with.turtle/for_all/3-4-4.flower.py | 917b9cc24fd5d2ca8f82570037dad0dd7ee82e1b | [
"BSD-3-Clause"
] | permissive | royqh1979/PyEasyGraphics | c7f57c1fb5a829287e9c462418998dcc0463a772 | 842121e461be3273f845866cf1aa40c312112af3 | refs/heads/master | 2021-06-11T10:34:03.001842 | 2021-04-04T10:47:52 | 2021-04-04T10:47:52 | 161,438,503 | 8 | 4 | BSD-3-Clause | 2021-04-04T10:47:53 | 2018-12-12T05:43:31 | Python | UTF-8 | Python | false | false | 337 | py | from easygraphics.turtle import *
def main():
create_world(800, 600)
set_speed(400)
for i in range(6):
for j in range(60):
fd(3)
rt(1)
rt(120)
for j in range(60):
fd(3)
rt(1)
rt(120)
rt(60)
pause()
close_world()
easy_run(main) | [
"royqh1979@gmail.com"
] | royqh1979@gmail.com |
4f676bf12515b70baea4496e48da040e19db6938 | 0dca74ba205f42b38c1d1a474350e57ff78352b4 | /Geometry/HGCalGeometry/test/python/testHGCalNeighbor_cfg.py | 2fa941f4241557585e83e0bb0e699de4e83df2f8 | [
"Apache-2.0"
] | permissive | jaimeleonh/cmssw | 7fd567997a244934d6c78e9087cb2843330ebe09 | b26fdc373052d67c64a1b5635399ec14525f66e8 | refs/heads/AM_106X_dev | 2023-04-06T14:42:57.263616 | 2019-08-09T09:08:29 | 2019-08-09T09:08:29 | 181,003,620 | 1 | 0 | Apache-2.0 | 2019-04-12T12:28:16 | 2019-04-12T12:28:15 | null | UTF-8 | Python | false | false | 1,732 | py | import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process("PROD",eras.Phase2C4)
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("Configuration.Geometry.GeometryExtended2023D28Reco_cff")
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Geometry.HGCalGeometry.hgcalTestNeighbor_cfi')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['phase2_realistic']
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HGCalGeom')
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
#process.p1 = cms.Path(process.generator*process.hgcalEETestNeighbor)
process.p1 = cms.Path(process.generator*process.hgcalEETestNeighbor*process.hgcalHEFTestNeighbor*process.hgcalHEBTestNeighbor)
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
11ae87bb61dbbdaf88257bb33a9cffad4e4b2702 | a38180435ac5786185c0aa48891c0aed0ab9d72b | /S4/S4 Decompiler/decompyle3/parsers/reducecheck/not_or_check.py | 91f0417f69f9a07e915508dd197ca8319a38c9a4 | [
"CC-BY-4.0"
] | permissive | NeonOcean/Environment | e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d | ca658cf66e8fd6866c22a4a0136d415705b36d26 | refs/heads/master | 2022-12-03T13:17:00.100440 | 2021-01-09T23:26:55 | 2021-01-09T23:26:55 | 178,096,522 | 1 | 1 | CC-BY-4.0 | 2022-11-22T20:24:59 | 2019-03-28T00:38:17 | Python | UTF-8 | Python | false | false | 1,598 | py | # Copyright (c) 2020 Rocky Bernstein
def not_or_check(
self, lhs: str, n: int, rule, ast, tokens: list, first: int, last: int
) -> bool:
# Note (exp1 and exp2) and (not exp1 or exp2) are close, especially in
# an control structure like an "if".
# "exp1 and exp2":
# exp1; POP_JUMP_IF_FALSE endif; exp2; POP_JUMP_IF_FALSE endif; then
#
# "not exp1 or exp2":
# exp1; POP_JUMP_IF_FALSE then; exp2 POP_JUMP_IF_FALSE endif; then
# The difference is whether the POP_JUMPs go to the same place or not.
expr_pjif = ast[0]
end_token = tokens[last-1]
if end_token.kind.startswith("POP_JUMP_IF_FALSE"):
while expr_pjif == "and_parts":
expr_pjif = expr_pjif[0]
pass
assert expr_pjif == "expr_pjif"
if expr_pjif[-1].attr != end_token.attr:
return True
# More "and" in a condition vs. "not or":
# Intuitively it has to do with where we go with the "and" or
# "not or". Right now if there are loop jumps involved
# we are saying this is "and", but this empirical and not on
# solid ground.
# If test jump is a backwards then, we have an "and", not a "not or".
first_offset = tokens[first].off2int()
if end_token.attr < first_offset:
return True
# Similarly if the test jump goes to another jump it is (probably?) an "and".
jump_target_inst_index = self.offset2inst_index[end_token.attr]
inst = self.insts[jump_target_inst_index-1]
return inst.is_jump()
pass
return False
| [
"40919586+NeonOcean@users.noreply.github.com"
] | 40919586+NeonOcean@users.noreply.github.com |
0350c795fa887e71cffc61e9518bb61ec12bd3d0 | 4b41a76c5c366ba2daa30843acea16609b8f5da7 | /2017/21/AoC17_21_1.py | 74d5bfb689ee820e1ec5834706e68b017c97dce6 | [] | no_license | grandfoosier/AdventOfCode | c4706cfefef61e80060cca89b0433636e42bf974 | a43fdd72fe4279196252f24a4894500a4e272a5d | refs/heads/master | 2020-06-11T12:36:48.699811 | 2019-01-14T23:44:44 | 2019-01-14T23:44:44 | 75,665,958 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,995 | py | class Art(object):
def __init__(self):
fname = "AoC17_21_1.txt"
self.rules = [line.rstrip("\n").split() for line in open(fname)]
self.lkup = [i[0] for i in self.rules]
self.grid = ['.#.','..#','###']
def _twfh(self, s):
return (s[1] + s[0] + '/' + # 01 -> 10
s[4] + s[3]) # 34 -> 43
def _twfv(self, s):
return (s[3] + s[4] + '/' + # 01 -> 34
s[0] + s[1]) # 34 -> 01
def _twr1(self, s):
return (s[3] + s[0] + '/' + # 01 -> 30
s[4] + s[1]) # 34 -> 41
def _twr2(self, s):
return (s[4] + s[3] + '/' + # 01 -> 43
s[1] + s[0]) # 34 -> 10
def _twr3(self, s):
return (s[1] + s[4] + '/' + # 01 -> 14
s[0] + s[3]) # 34 -> 03
def _twf1(self, s):
return (s[4] + s[1] + '/' + # 01 -> 41
s[3] + s[0]) # 34 -> 30
def _twf3(self, s):
return (s[0] + s[3] + '/' + # 01 -> 03
s[1] + s[4]) # 34 -> 14
def _thfh(self, s):
return (s[2] + s[1] + s[0] + '/' + # 012 210
s[6] + s[5] + s[4] + '/' + # 456 -> 654
s[10] + s[9] + s[8]) # 89A A98
def _thfv(self, s):
return (s[8] + s[9] + s[10] + '/' + # 012 89A
s[4] + s[5] + s[6] + '/' + # 456 -> 456
s[0] + s[1] + s[2]) # 89A 012
def _thr1(self, s):
return (s[8] + s[4] + s[0] + '/' + # 012 840
s[9] + s[5] + s[1] + '/' + # 456 -> 951
s[10] + s[6] + s[2]) # 89A A62
def _thr2(self, s):
return (s[10] + s[9] + s[8] + '/' + # 012 A98
s[6] + s[5] + s[4] + '/' + # 456 -> 654
s[2] + s[1] + s[0]) # 89A 210
def _thr3(self, s):
return (s[2] + s[6] + s[10] + '/' + # 012 26A
s[1] + s[5] + s[9] + '/' + # 456 -> 159
s[0] + s[4] + s[8]) # 89A 048
def _thf1(self, s):
return (s[10] + s[6] + s[2] + '/' + # 012 A62
s[9] + s[5] + s[1] + '/' + # 456 -> 951
s[8] + s[4] + s[0]) # 89A 840
def _thf3(self, s):
return (s[0] + s[4] + s[8] + '/' + # 012 048
s[1] + s[5] + s[9] + '/' + # 456 -> 159
s[2] + s[6] + s[10]) # 89A 26A
def _tw2th(self):
fmd = []
for i in range(len(self.grid)/2):
fmd.append([])
for j in range(len(self.grid)/2):
fmd[i].append(self.grid[2*i][2*j:2*j+2] + '/' +
self.grid[2*i+1][2*j:2*j+2])
new = []
for i in range(len(fmd)):
new.append([])
for j in fmd[i]:
if j in self.lkup:
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twfh(j) in self.lkup:
j = self._twfh(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twfv(j) in self.lkup:
j = self._twfv(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twr1(j) in self.lkup:
j = self._twr1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twr2(j) in self.lkup:
j = self._twr2(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twr3(j) in self.lkup:
j = self._twr3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twf1(j) in self.lkup:
j = self._twf1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twf3(j) in self.lkup:
j = self._twf3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
else:
pause = raw_input("OOPS")
self.grid = []
for i in range(len(new)):
self.grid.extend(['','',''])
for j in range(len(new)):
self.grid[3*i+0] += new[i][j][0:3]
self.grid[3*i+1] += new[i][j][4:7]
self.grid[3*i+2] += new[i][j][8:11]
print ""
for i in self.grid: print i
def _th2fo(self):
fmd = []
for i in range(len(self.grid)/3):
fmd.append([])
for j in range(len(self.grid)/3):
fmd[i].append(self.grid[3*i][3*j:3*j+3] + '/' +
self.grid[3*i+1][3*j:3*j+3] + '/' +
self.grid[3*i+2][3*j:3*j+3])
new = []
for i in range(len(fmd)):
new.append([])
for j in fmd[i]:
if j in self.lkup:
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thfh(j) in self.lkup:
j = self._thfh(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thfv(j) in self.lkup:
j = self._thfv(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thr1(j) in self.lkup:
j = self._thr1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thr2(j) in self.lkup:
j = self._thr2(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thr3(j) in self.lkup:
j = self._thr3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thf1(j) in self.lkup:
j = self._thf1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thf3(j) in self.lkup:
j = self._thf3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
else:
pause = raw_input("OOPS")
self.grid = []
for i in range(len(new)):
self.grid.extend(['','','',''])
for j in range(len(new)):
self.grid[4*i+0] += new[i][j][0:4]
self.grid[4*i+1] += new[i][j][5:9]
self.grid[4*i+2] += new[i][j][10:14]
self.grid[4*i+3] += new[i][j][15:19]
print ""
for i in self.grid: print i
def increment(self, n):
for i in self.grid: print i
print ""
for i in range(n):
if len(self.grid) % 2 == 0: self._tw2th()
else: self._th2fo()
pause = raw_input("")
def count_on(self):
c = 0
for i in self.grid: c += i.count('#')
return c
A = Art()
print ""
A.increment(5)
print A.count_on()
print "\n"
| [
"noreply@github.com"
] | grandfoosier.noreply@github.com |
fad51eb5f3e5f98fc3c8c6f2df4c0bf604c80a66 | e0ef688e339e6f4a68382d821d159185e4297628 | /rhodopsin/experiment_base.py | bcbc827fdccc8fee442bc7d768722f86188783e1 | [
"MIT"
] | permissive | djpetti/rhodopsin | bc11befcc5e90e29705d74ab59e1405586df998b | 97bdb9a6ba3c29b1fe1dd1e60b0b41e5a247ccf1 | refs/heads/master | 2021-07-01T08:00:21.414567 | 2019-05-18T21:39:36 | 2019-05-18T21:39:36 | 147,679,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,528 | py | import abc
import os
import signal
from . import menu
from . import params
class ExperimentBase(abc.ABC):
""" Base class for experiments that defines the API. """
def __init__(self, save_file="experiment.rhp", hyperparams=None,
status=None):
"""
Args:
save_file: File in which to save the model data.
hyperparams: Optional custom hyperparameters to use.
status: Optional custom status parameters to use. """
self.__save_file = save_file
# Create hyperparameters.
self.__params = hyperparams
if self.__params is None:
self.__params = params.HyperParams()
# Create status parameters.
self.__status = status
if self.__status is None:
self.__status = params.Status()
# Add default status parameters.
self.__status.add_if_not_set("iterations", 0)
# Register the signal handler.
signal.signal(signal.SIGINT, self._handle_signal)
# Create the menu tree.
self.__menus = menu.MenuTree()
main_menu = menu.MainMenu(self.__params, self.__status)
adjust_menu = menu.AdjustMenu(self.__params, self.__status)
status_menu = menu.StatusMenu(self.__params, self.__status)
self.__menus.add_menu(main_menu)
self.__menus.add_menu(adjust_menu)
self.__menus.add_menu(status_menu)
# Run custom initialization code.
self._init_experiment()
# Check for an existing model.
if self._model_exists(self.__save_file):
load_menu = menu.LoadModelMenu(self.__params, self.__status,
self.__save_file)
load_menu.show()
# Check what was selected.
if load_menu.should_load():
# Load the model.
self._load_model(self.__save_file)
@abc.abstractmethod
def _handle_signal(self, signum, frame):
""" Handles the user hitting Ctrl+C. This is supposed to bring up the
menu.
Args:
signum: The signal number that triggered this.
frame: Current stack frame. """
pass
def _show_main_menu(self):
""" Show the main menu. """
self.__menus.show("main")
def _checkpoint(self):
"""
Saves the model at this point.
"""
self._save_model(self.__save_file)
def _init_experiment(self):
""" Runs any custom initialization code for the experiment. This will be
run right after we've configured parameters and hyperparameters, and
before we've attempted to load the model. By default, it does nothing.
"""
pass
@abc.abstractmethod
def _run_training_step(self):
""" Runs a single training iteration. This is meant to be overidden by a
subclass. """
pass
@abc.abstractmethod
def _run_testing_step(self):
""" Runs a single testing iteration. This is meant to be overidden by a
subclass. """
pass
def _save_model(self, save_file):
""" Saves the model. By default, it does nothing. It should be
implemented by a subclass.
Args:
save_file: The path at which to save the model. """
pass
def _load_model(self, save_file):
""" Loads a model from disk. If _save_model() is used, this must be
implemented by a subclass. Note that this is not an abstract method,
because if save_model is not used, it need not be implemented either.
Args:
save_file: The path from which to load the model. """
raise NotImplementedError(
"_load_model() must be implemented by subclass.")
@classmethod
def _model_exists(cls, save_file):
""" Checks if a saved model exists. By default, it just checks if
save_path exists, but it can be overridden to allow for more
sophisticated functionality.
Args:
save_file: The possible path to the saved model. """
return os.path.exists(save_file)
@abc.abstractmethod
def train(self):
""" Runs the training procedure to completion. """
pass
def get_params(self):
"""
Returns:
The hyperparameters being used for this experiment. """
return self.__params
def get_status(self):
"""
Returns:
The status parameters being used for this experiment. """
return self.__status
| [
"djpetti@gmail.com"
] | djpetti@gmail.com |
507c7a2c804bb7e49d3d43b11b73884c2d80ed71 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20180401/traffic_manager_user_metrics_key.py | 6a8b7be397ddbe4a0f2f1fb0d12eca8f2972edca | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,274 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['TrafficManagerUserMetricsKeyArgs', 'TrafficManagerUserMetricsKey']
@pulumi.input_type
class TrafficManagerUserMetricsKeyArgs:
def __init__(__self__):
"""
The set of arguments for constructing a TrafficManagerUserMetricsKey resource.
"""
pass
class TrafficManagerUserMetricsKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None):
"""
Class representing Traffic Manager User Metrics.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[TrafficManagerUserMetricsKeyArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Class representing Traffic Manager User Metrics.
:param str resource_name: The name of the resource.
:param TrafficManagerUserMetricsKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TrafficManagerUserMetricsKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TrafficManagerUserMetricsKeyArgs.__new__(TrafficManagerUserMetricsKeyArgs)
__props__.__dict__["key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20180401:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-native:network:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-nextgen:network:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-native:network/v20180801:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-nextgen:network/v20180801:TrafficManagerUserMetricsKey")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(TrafficManagerUserMetricsKey, __self__).__init__(
'azure-native:network/v20180401:TrafficManagerUserMetricsKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TrafficManagerUserMetricsKey':
"""
Get an existing TrafficManagerUserMetricsKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TrafficManagerUserMetricsKeyArgs.__new__(TrafficManagerUserMetricsKeyArgs)
__props__.__dict__["key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return TrafficManagerUserMetricsKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[Optional[str]]:
"""
The key returned by the User Metrics operation.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the resource. Ex- Microsoft.Network/trafficManagerProfiles.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
ae4858ddf9b7ff0bb77dffc2a48b39cb7643782b | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/domain/KoubeiQualityTestCloudacptItemQueryModel.py | bbc02ad96b03b678a8fa106974b1af53977ddacb | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 2,209 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiQualityTestCloudacptItemQueryModel(object):
def __init__(self):
self._activity_id = None
self._batch_id = None
self._pid = None
self._uid = None
@property
def activity_id(self):
return self._activity_id
@activity_id.setter
def activity_id(self, value):
self._activity_id = value
@property
def batch_id(self):
return self._batch_id
@batch_id.setter
def batch_id(self, value):
self._batch_id = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, value):
self._uid = value
def to_alipay_dict(self):
params = dict()
if self.activity_id:
if hasattr(self.activity_id, 'to_alipay_dict'):
params['activity_id'] = self.activity_id.to_alipay_dict()
else:
params['activity_id'] = self.activity_id
if self.batch_id:
if hasattr(self.batch_id, 'to_alipay_dict'):
params['batch_id'] = self.batch_id.to_alipay_dict()
else:
params['batch_id'] = self.batch_id
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.uid:
if hasattr(self.uid, 'to_alipay_dict'):
params['uid'] = self.uid.to_alipay_dict()
else:
params['uid'] = self.uid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiQualityTestCloudacptItemQueryModel()
if 'activity_id' in d:
o.activity_id = d['activity_id']
if 'batch_id' in d:
o.batch_id = d['batch_id']
if 'pid' in d:
o.pid = d['pid']
if 'uid' in d:
o.uid = d['uid']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
2aa1abfe9c766dd05c3b82d22bb3d5e20a3d7ec2 | 039c2e60b859d88bb686c0e66bc6dab2ab723b8e | /apps/door_limits/migrations/0003_auto_20191024_1554.py | ec0156b7c63c8b53f585a6b765b18fe2dccfcca7 | [] | no_license | ccc-0/ECS | 850613971e4c6fd9cbb6ddcbe2c51b5285d622ac | ef4d69cb4c6fd1b1bbd40ba9c754c8e50c56d8ee | refs/heads/master | 2020-09-13T21:50:42.033517 | 2020-02-13T03:47:10 | 2020-02-13T03:47:10 | 222,913,137 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Generated by Django 2.2.6 on 2019-10-24 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('door_limits', '0002_door_approval_user_id'),
]
operations = [
migrations.AlterField(
model_name='door_approval',
name='door_audittime',
field=models.DateTimeField(null=True, verbose_name='审批时间'),
),
]
| [
"1056179315@qq.com"
] | 1056179315@qq.com |
99917f155da4e2422ceaad95199050b2dcce42ba | 2052a12f0ab7a827d6427b5533b6ae29847dcc3b | /auto_commit.py | 62ed14d9722982e710ac10eb7f581bae2986a7e1 | [
"MIT"
] | permissive | cleiveliu/leetcodecn | 7db7af4da18e62bd592afc9f81dfa4aab46adced | 618a4b63a9cd055f1782903e860e9a93dfd30fc9 | refs/heads/master | 2020-08-28T23:44:10.102451 | 2020-07-19T15:26:45 | 2020-07-19T15:26:45 | 217,856,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | """
auto commit git and push to master with a git path and a optional commit message.
usage:
put this file in your git project dir and run it or \
script.py [-p|-pathname] filename [-m|-message] message
"""
import sys
import os
class Args:
def __init__(
self, pathname=os.path.dirname(__file__), commit_message="auto commit"
):
self.pathname = pathname
self.commit_message = commit_message
def __repr__(self):
return "Args(pathname={}, commit_message={})".format(
self.pathname, self.commit_message
)
def _exit():
print(__doc__)
sys.exit(1)
def perse_args():
args = sys.argv[1:]
args = list(map(lambda x: x.lower(), args))
theArgs = Args()
index = 0
if index < len(args):
if args[index] in ("-p", "-pathname"):
if index + 1 < len(args):
theArgs.pathname = args[index + 1]
index += 2
else:
_exit()
if index < len(args):
if args[index] in ("-m", "-message", "--m"):
if index + 1 < len(args):
theArgs.commit_massage = args[index + 1]
index += 2
else:
_exit()
else:
_exit()
if index < len(args):
_exit()
return theArgs
def execute(args: Args):
os.chdir(args.pathname)
os.system("git add .")
os.system('git commit -m "{}"'.format(args.commit_massage))
os.system("git push")
if __name__ == "__main__":
args = perse_args()
print(f"args:\n{args}")
execute(args)
| [
"cleiveliu1@gmail.com"
] | cleiveliu1@gmail.com |
7c7528acff2e58560608a00d39c207443eb6d648 | be37e5a350ef9cd4e1742c321cde206434593c27 | /test/integration/query_block.py | ecbfe177a112065b729795a188b1eb0c09390e2d | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | points-org/fabric-sdk-py | 9b8e763dc9e3e8e3710fb35fedd2e44ddc682126 | d689032772873027938599b8e54a676bf083be1f | refs/heads/master | 2021-07-01T15:13:11.688343 | 2020-09-08T11:28:05 | 2020-09-14T03:53:33 | 162,869,379 | 0 | 0 | Apache-2.0 | 2019-08-14T06:12:00 | 2018-12-23T06:58:31 | Python | UTF-8 | Python | false | false | 4,789 | py | # Copyright IBM ALL Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
import logging
from time import sleep
from hfc.fabric.peer import create_peer
from hfc.fabric.transaction.tx_context import create_tx_context
from hfc.fabric.transaction.tx_proposal_request import create_tx_prop_req, \
CC_INVOKE, CC_TYPE_GOLANG, CC_INSTANTIATE, CC_INSTALL, TXProposalRequest
from hfc.util.crypto.crypto import ecies
from hfc.util.utils import build_tx_req, send_transaction
from test.integration.utils import get_peer_org_user,\
BaseTestCase
from test.integration.config import E2E_CONFIG
from test.integration.e2e_utils import build_channel_request,\
build_join_channel_req
from queue import Queue
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
test_network = E2E_CONFIG['test-network']
CC_PATH = 'github.com/example_cc'
CC_NAME = 'example_cc'
CC_VERSION = '1.0'
class QueryBlockTest(BaseTestCase):
def invoke_chaincode(self):
self.channel = self.client.new_channel(self.channel_name)
org1 = "org1.example.com"
peer_config = test_network['org1.example.com']['peers']['peer0']
tls_cacerts = peer_config['tls_cacerts']
opts = (('grpc.ssl_target_name_override',
peer_config['server_hostname']),)
endpoint = peer_config['grpc_request_endpoint']
self.org1_peer = create_peer(endpoint=endpoint,
tls_cacerts=tls_cacerts,
opts=opts)
self.org1_admin = get_peer_org_user(org1,
"Admin",
self.client.state_store)
crypto = ecies()
tran_prop_req_install = create_tx_prop_req(
prop_type=CC_INSTALL,
cc_path=CC_PATH,
cc_type=CC_TYPE_GOLANG,
cc_name=CC_NAME,
cc_version=CC_VERSION)
tx_context_install = create_tx_context(
self.org1_admin,
crypto,
tran_prop_req_install)
args_dep = ['a', '200', 'b', '300']
tran_prop_req_dep = create_tx_prop_req(
prop_type=CC_INSTANTIATE,
cc_type=CC_TYPE_GOLANG,
cc_name=CC_NAME,
cc_version=CC_VERSION,
args=args_dep,
fcn='init')
tx_context_dep = create_tx_context(self.org1_admin,
crypto,
tran_prop_req_dep)
args = ['a', 'b', '100']
tran_prop_req = create_tx_prop_req(prop_type=CC_INVOKE,
cc_type=CC_TYPE_GOLANG,
cc_name=CC_NAME,
cc_version=CC_VERSION,
fcn='invoke',
args=args)
tx_context = create_tx_context(self.org1_admin, crypto, tran_prop_req)
request = build_channel_request(self.client,
self.channel_tx,
self.channel_name)
self.client._create_channel(request)
sleep(5)
join_req = build_join_channel_req(org1, self.channel, self.client)
self.channel.join_channel(join_req)
sleep(5)
self.client.send_install_proposal(tx_context_install, [self.org1_peer])
sleep(5)
res = self.channel.send_instantiate_proposal(tx_context_dep,
[self.org1_peer])
sleep(5)
tran_req = build_tx_req(res)
send_transaction(self.channel.orderers, tran_req, tx_context)
sleep(5)
tx_context_tx = create_tx_context(self.org1_admin,
crypto,
TXProposalRequest())
res = self.channel.send_tx_proposal(tx_context, [self.org1_peer])
tran_req = build_tx_req(res)
sleep(5)
send_transaction(self.channel.orderers, tran_req, tx_context_tx)
def test_query_block_success(self):
self.invoke_chaincode()
tx_context = create_tx_context(self.org1_admin,
ecies(),
TXProposalRequest())
response = self.channel.query_block(tx_context,
[self.org1_peer],
"1")
q = Queue(1)
response.subscribe(on_next=lambda x: q.put(x),
on_error=lambda x: q.put(x))
res = q.get(timeout=10)
logger.debug(res[0][0][0])
self.assertEqual(res[0][0][0].response.status, 200)
| [
"dixingxu@gmail.com"
] | dixingxu@gmail.com |
e882965dc976548eb945b75960f6b5fca4d2bc1f | 7769cb512623c8d3ba96c68556b2cea5547df5fd | /configs/carafe/faster_rcnn_r50_fpn_carafe_1x.py | 94c8a0fc1a56bc1a6601421de7e9a46df277b5d6 | [
"MIT"
] | permissive | JialeCao001/D2Det | 0e49f4c76e539d574e46b02f278242ca912c31ea | a76781ab624a1304f9c15679852a73b4b6770950 | refs/heads/master | 2022-12-05T01:00:08.498629 | 2020-09-04T11:33:26 | 2020-09-04T11:33:26 | 270,723,372 | 312 | 88 | MIT | 2020-07-08T23:53:23 | 2020-06-08T15:37:35 | Python | UTF-8 | Python | false | false | 5,751 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
activation=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
evaluation = dict(interval=1)
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_carafe_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"connor@tju.edu.cn"
] | connor@tju.edu.cn |
051cdb1c37fae845be8313b348917477fe0c38b2 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc026/A/4781959.py | 05338ad4307124120e6d44e3642dc59bfc7ff9e1 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | def main():
n, a, b = map(int, input().split())
nb = min(n, 5)
na = n - nb
r = b * nb + a * na
print(r)
main() | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
a4b2b2d69ee2f1995a73f520d25aa4cd34320ae2 | 3db2fcd1a34ae7b22225029587369f49424457dd | /classifier_alignment/AnnotationLoader.py | ce50dc114c4bb171a9b8ded4eeaff6235048fe87 | [] | no_license | pombredanne/realigner | 7f0fdfdf42f757fead45cdeb5ea2901c4965e944 | b0c32cace20dd720c7609f009d86846d9ecb750f | refs/heads/master | 2021-01-18T03:57:30.977009 | 2014-05-06T09:35:46 | 2014-05-06T09:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,277 | py | import re
__author__ = 'michal'
from hmm.HMMLoader import HMMLoader
import track
from tools.intervalmap import intervalmap
from classifier_alignment.AnnotationConfig import register as register_annotations
import constants
class AnnotationLoader:
def __init__(self, sequence_regexp, loader=None):
if loader is None:
self.loader = HMMLoader()
register_annotations(self.loader)
self.x_regexp = sequence_regexp[0]
self.y_regexp = sequence_regexp[1]
@staticmethod
def get_annotation_at(annotations, i):
"""
Returns annotations at position i
@param annotations:
@param i:
"""
base_annotation = dict()
if annotations is not None:
for key in annotations:
base_annotation[key] = annotations[key][i]
return base_annotation
def _intervals_to_interval_map(self, intervals, offset):
"""
Converts intervals from track to intervalmap, for searching
currently supports binary annotations only
"""
m = intervalmap()
m[:] = 0
for i in intervals:
m[i[1]+offset:i[2]+offset] = 1
return m
def _get_annotation_from_bed(self, fname, offset):
"""
Reads intervals from BED file
"""
try:
with track.load(fname) as ann:
ann = ann.read(fields=['start', 'end'])
intervals = self._intervals_to_interval_map(ann, offset)
except Exception:
intervals = self._intervals_to_interval_map([], 0)
return intervals
def _get_sequence_annotations(
self,
annotations,
sequence_annotations_config
):
"""
Returns annotations for one sequence
"""
res = dict()
for annotation in annotations:
res[annotation] = self._get_annotation_from_bed(
*sequence_annotations_config[annotation]
)
return res
def _get_seq_name(self, names, regexp):
r = re.compile(regexp)
matches = [name for name in names if r.match(name)]
if len(matches) != 1:
raise RuntimeError(
'Cannot get name for regexp', regexp, '. Found', len(matches), 'matches.'
)
return matches[0]
def get_annotations_from_model(self, model):
if not constants.annotations_enabled:
return None, None, None
if model is None:
raise RuntimeError('No annotation model!')
names = model.sequences.keys()
x_name = self._get_seq_name(names, self.x_regexp)
y_name = self._get_seq_name(names, self.y_regexp)
annotations = model.annotations
# print 'Using annotations for x:', x_name
annotations_x = self._get_sequence_annotations(
annotations, model.sequences[x_name]
)
# print 'Using annotations for y:', y_name
annotations_y = self._get_sequence_annotations(
annotations, model.sequences[y_name]
)
return annotations, annotations_x, annotations_y
def get_annotations(self, fname):
model = self.loader.load(fname)
return self.get_annotations_from_model(model)
| [
"mhozza@gmail.com"
] | mhozza@gmail.com |
dad547f66f869ac30955e1734c0fdf2097890e2c | bd2a3d466869e0f8cb72075db7daec6c09bbbda1 | /sdk/network/azure-mgmt-network/azure/mgmt/network/_operations_mixin.py | 264c78dbcd0d5ffb95a36599de146d425e3d3237 | [
"MIT"
] | permissive | samvaity/azure-sdk-for-python | 7e8dcb2d3602d81e04c95e28306d3e2e7d33b03d | f2b072688d3dc688fed3905c558cff1fa0849b91 | refs/heads/master | 2021-08-11T21:14:29.433269 | 2019-07-19T17:40:10 | 2019-07-19T17:40:10 | 179,733,339 | 0 | 1 | MIT | 2019-04-05T18:17:43 | 2019-04-05T18:17:42 | null | UTF-8 | Python | false | false | 7,825 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest import Serializer, Deserializer
class NetworkManagementClientOperationsMixin(object):
def check_dns_name_availability(self, location, domain_name_label, custom_headers=None, raw=False, **operation_config):
"""Checks whether a domain name in the cloudapp.azure.com zone is
available for use.
:param location: The location of the domain name.
:type location: str
:param domain_name_label: The domain name to be verified. It must
conform to the following regular expression:
^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
:type domain_name_label: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DnsNameAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_04_01.models.DnsNameAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = self._get_api_version('check_dns_name_availability')
if api_version == '2015-06-15':
from .v2015_06_15.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2016-09-01':
from .v2016_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-03-01':
from .v2017_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-08-01':
from .v2017_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-09-01':
from .v2017_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-11-01':
from .v2017_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-01-01':
from .v2018_01_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-04-01':
from .v2018_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-08-01':
from .v2018_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-10-01':
from .v2018_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-12-01':
from .v2018_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-02-01':
from .v2019_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance.config = self.config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.check_dns_name_availability(location, domain_name_label, custom_headers, raw, **operation_config)
def supported_security_providers(self, resource_group_name, virtual_wan_name, custom_headers=None, raw=False, **operation_config):
"""Gives the supported security providers for the virtual wan.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which
supported security providers are needed.
:type virtual_wan_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualWanSecurityProviders or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_04_01.models.VirtualWanSecurityProviders or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.network.v2019_04_01.models.ErrorException>`
"""
api_version = self._get_api_version('supported_security_providers')
if api_version == '2018-08-01':
from .v2018_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-10-01':
from .v2018_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-12-01':
from .v2018_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-02-01':
from .v2019_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance.config = self.config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.supported_security_providers(resource_group_name, virtual_wan_name, custom_headers, raw, **operation_config)
| [
"noreply@github.com"
] | samvaity.noreply@github.com |
c8d9acb81ae074a09b5bba7f60d7cb919bfd6a0b | 5c099927aedc6fdbc515f40ff543c65b3bf4ec67 | /algorithms/path-sum-iii/src/Solution.py | ad4282344a76d00167c30841afc494b03849c924 | [] | no_license | bingzhong-project/leetcode | 7a99cb6af1adfbd9bb1996a7f66a65679053c478 | ba82e7d94840b3fec272e4c5f82e3a2cfe4b0505 | refs/heads/master | 2020-04-15T09:27:33.979519 | 2020-03-10T03:43:07 | 2020-03-10T03:43:07 | 164,550,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root: 'TreeNode', sum: 'int') -> 'int':
def func(node, res):
if node.val == sum:
res[0] += 1
node_sums = [node.val]
left_sums = []
right_sums = []
if node.left:
left_sums = func(node.left, res)
if node.right:
right_sums = func(node.right, res)
for left_sum in left_sums:
temp = left_sum + node.val
if temp == sum:
res[0] += 1
node_sums.append(temp)
for right_sum in right_sums:
temp = right_sum + node.val
if temp == sum:
res[0] += 1
node_sums.append(temp)
return node_sums
res = [0]
if root:
func(root, res)
return res[0]
| [
"zhongyongbin@foxmail.com"
] | zhongyongbin@foxmail.com |
b2456060afc71d8ae1bafe6a039a40981cd94970 | b8ddb0028579ba735bfde8de5e615884e05b012f | /jamaica/v1/lists/serializers.py | 8cf34ebb1a3a5f0804777c537e6d465b456aaf4d | [] | no_license | cohoe/jamaica | f4636eacd6a900de769641e3c3f60fe197be1999 | 0bf053e7b6db291b5aeb53fbd6f7f45082c9df9b | refs/heads/master | 2021-08-10T08:10:36.803415 | 2021-07-18T16:49:45 | 2021-07-18T16:49:45 | 235,926,691 | 2 | 0 | null | 2021-03-13T02:23:06 | 2020-01-24T02:24:22 | Python | UTF-8 | Python | false | false | 1,059 | py | from flask_restx import fields
from jamaica.v1.restx import api
from jamaica.v1.serializers import SearchResultBase
ListItemObject = api.model('ListItemObject', {
'cocktail_slug': fields.String(attribute='cocktail_slug', description='Slug of the cocktail.'),
'spec_slug': fields.String(attribute='spec_slug', description='Optional slug of the specific spec.', required=False),
'highlight': fields.Boolean(attribute='highlight', description='Boolean of whether this is highlighted or not.')
})
ListObject = api.model('ListObject', {
'id': fields.String(attribute='id', description='ID of this list.'),
'display_name': fields.String(attribute='display_name', description='Display name of this list.'),
'items': fields.List(fields.Nested(ListItemObject), attribute='items'),
})
ListSearchItem = api.inherit('ListSearchItem', SearchResultBase, {
'slug': fields.String(attribute='hit.slug', description='This items slug.'),
'display_name': fields.String(attribute='hit.display_name', description='This items display name.'),
})
| [
"grant@grantcohoe.com"
] | grant@grantcohoe.com |
a4d310d2b5b8002735888fb0537e58489cea744e | 99094cc79bdbb69bb24516e473f17b385847cb3a | /33.Search in Rotated Sorted Array/Solution.py | ca44fc1c7fe4e25beda59fa0010124fad406e966 | [] | no_license | simonxu14/LeetCode_Simon | 7d389bbfafd3906876a3f796195bb14db3a1aeb3 | 13f4595374f30b482c4da76e466037516ca3a420 | refs/heads/master | 2020-04-06T03:33:25.846686 | 2016-09-10T00:23:11 | 2016-09-10T00:23:11 | 40,810,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | __author__ = 'Simon'
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
l = 0
r = len(nums) - 1
while l <= r:
mid = (r + l)/2
if nums[mid] == target:
return mid
if nums[l] <= nums[mid]:
if nums[l] <= target < nums[mid]:
r = mid - 1
else:
l = mid + 1
else:
if nums[mid] < target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return -1 | [
"simonxu14@gmail.com"
] | simonxu14@gmail.com |
77c6ff64d70bf43e677e3a109dec9871b6a399d4 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-bindings/bin/custom/R/gen_svd.py | 29b777c6fd2ccee93d258de42738e4edb898a7cc | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 1,431 | py | rest_api_version = 99
extensions = dict(
required_params=['training_frame', 'x', 'destination_key'],
validate_required_params="",
set_required_params="""
parms$training_frame <- training_frame
if(!missing(x))
parms$ignored_columns <- .verify_datacols(training_frame, x)$cols_ignore
if(!missing(destination_key)) {
warning("'destination_key' is deprecated; please use 'model_id' instead.")
if(missing(model_id)) {
parms$model_id <- destination_key
}
}
""",
)
doc = dict(
preamble="""
Singular value decomposition of an H2O data frame using the power method
""",
params=dict(
x="""
A vector containing the \code{character} names of the predictors in the model.
""",
destination_key="""
(Optional) The unique key assigned to the resulting model.
Automatically generated if none is provided.
""",
),
returns="""
an object of class \linkS4class{H2ODimReductionModel}.
""",
references="""
N. Halko, P.G. Martinsson, J.A. Tropp. {Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions}[http://arxiv.org/abs/0909.4061]. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011.
""",
examples="""
library(h2o)
h2o.init()
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
h2o.svd(training_frame = australia, nv = 8)
"""
)
| [
"noreply@github.com"
] | Winfredemalx54.noreply@github.com |
ab940f2287f99c5c3edf5a43baede27081603152 | e13f6678fb1be916f4746d8663edfbd841531f7e | /ebc/pauta/interfaces/servico.py | e23c1b36241754dee8b39345394ee8fba1e7813b | [
"Unlicense"
] | permissive | lflrocha/ebc.pauta | 67cf41ad0c751c70a53f609204f913c441de0ab3 | 1a77e9f47e22b60af88cf23f492a8b47ddfd27b6 | refs/heads/master | 2021-01-10T08:38:43.924935 | 2015-05-30T19:24:00 | 2015-05-30T19:24:00 | 36,572,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from zope import schema
from zope.interface import Interface
from zope.app.container.constraints import contains
from zope.app.container.constraints import containers
from ebc.pauta import pautaMessageFactory as _
class IServico(Interface):
""""""
# -*- schema definition goes here -*-
| [
"lflrocha@gmail.com"
] | lflrocha@gmail.com |
802ec2a72d6779bc336c31b7a68a721565858c5a | 7a39aed5ceff9070864afea30d3369ec70da093d | /tests/conftest.py | e1e05dcb51c92af9c1b689352a91423da84af5bb | [
"BSD-3-Clause"
] | permissive | Omulosi/reader | a0f3fc2c787481c14254edd7bfcd81f715f51a5d | 12759bb9441846eb5fce618137a6e70e2ec3c286 | refs/heads/master | 2023-07-15T12:02:10.365563 | 2021-08-30T11:37:35 | 2021-08-30T11:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | import sqlite3
import sys
from contextlib import closing
from functools import wraps
import py.path
import pytest
from utils import reload_module
from reader import make_reader as original_make_reader
from reader._storage import Storage
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_collection_modifyitems(config, items): # pragma: no cover
apply_runslow(config, items)
apply_flaky_pypy_sqlite3(items)
def apply_runslow(config, items): # pragma: no cover
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
def apply_flaky_pypy_sqlite3(items): # pragma: no cover
# getting intermittent sqlite3 errors on pypy;
# https://github.com/lemon24/reader/issues/199#issuecomment-716475686
if sys.implementation.name != 'pypy':
return
def rerun_filter(err, *args):
return issubclass(err[0], sqlite3.InterfaceError)
sqlite3_flaky = pytest.mark.flaky(rerun_filter=rerun_filter, max_runs=10)
for item in items:
item.add_marker(sqlite3_flaky)
@pytest.fixture
def make_reader(request):
@wraps(original_make_reader)
def make_reader(*args, **kwargs):
reader = original_make_reader(*args, **kwargs)
request.addfinalizer(reader.close)
return reader
return make_reader
@pytest.fixture
def reader():
with closing(original_make_reader(':memory:', feed_root='')) as reader:
yield reader
@pytest.fixture
def storage():
with closing(Storage(':memory:')) as storage:
yield storage
def call_update_feeds(reader, _):
reader.update_feeds()
def call_update_feeds_workers(reader, _):
reader.update_feeds(workers=2)
def call_update_feeds_iter(reader, _):
for _ in reader.update_feeds_iter():
pass
def call_update_feeds_iter_workers(reader, _):
for _ in reader.update_feeds_iter(workers=2):
pass
def call_update_feed(reader, url):
reader.update_feed(url)
@pytest.fixture(
params=[
call_update_feeds,
pytest.param(call_update_feeds_workers, marks=pytest.mark.slow),
call_update_feeds_iter,
pytest.param(call_update_feeds_iter_workers, marks=pytest.mark.slow),
call_update_feed,
]
)
def call_update_method(request):
return request.param
def feed_arg_as_str(feed):
return feed.url
def feed_arg_as_feed(feed):
return feed
@pytest.fixture(params=[feed_arg_as_str, feed_arg_as_feed])
def feed_arg(request):
return request.param
def entry_arg_as_tuple(entry):
return entry.feed.url, entry.id
def entry_arg_as_entry(entry):
return entry
@pytest.fixture(params=[entry_arg_as_tuple, entry_arg_as_entry])
def entry_arg(request):
return request.param
@pytest.fixture
def db_path(tmpdir):
return str(tmpdir.join('db.sqlite'))
@pytest.fixture
def data_dir():
return py.path.local(__file__).dirpath().join('data')
| [
"damian.adrian24@gmail.com"
] | damian.adrian24@gmail.com |
e68f8e37594831072a4f8e00d262136ca8794866 | 5d902e2565b08dc6b8bb2f06231a4319d9715513 | /polyaxon/runner/spawners/notebook_spawner.py | 72319bf6c8f57f367669104d98e7564e0899ea9e | [
"MIT"
] | permissive | rohansaphal97/polyaxon | bd4febfc94b7d1aa95ef8152472c3dcba725f6b2 | ee42a05e40c4d400a281b3b2c5d26f5b46bd785c | refs/heads/master | 2020-03-13T19:51:44.783780 | 2018-04-26T17:52:35 | 2018-04-26T18:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,763 | py | import json
import logging
import random
from django.conf import settings
from libs.utils import get_hmac
from projects.paths import get_project_repos_path
from runner.spawners.base import get_pod_volumes
from runner.spawners.project_spawner import ProjectSpawner
from runner.spawners.templates import constants, deployments, ingresses, pods, services
logger = logging.getLogger('polyaxon.spawners.notebook')
class NotebookSpawner(ProjectSpawner):
NOTEBOOK_JOB_NAME = 'notebook'
PORT = 8888
def get_notebook_url(self):
return self._get_service_url(self.NOTEBOOK_JOB_NAME)
def get_notebook_token(self):
return get_hmac(settings.APP_LABELS_NOTEBOOK, self.project_uuid)
@staticmethod
def get_notebook_code_volume():
volume = pods.get_volume(volume=constants.REPOS_VOLUME,
claim_name=settings.REPOS_CLAIM_NAME,
volume_mount=settings.REPOS_ROOT)
volume_mount = pods.get_volume_mount(volume=constants.REPOS_VOLUME,
volume_mount=settings.REPOS_ROOT)
return volume, volume_mount
def request_notebook_port(self):
if not self._use_ingress():
return self.PORT
labels = 'app={},role={}'.format(settings.APP_LABELS_NOTEBOOK,
settings.ROLE_LABELS_DASHBOARD)
ports = [service.spec.ports[0].port for service in self.list_services(labels)]
port = random.randint(*settings.NOTEBOOK_PORT_RANGE)
while port in ports:
port = random.randint(*settings.NOTEBOOK_PORT_RANGE)
return port
def start_notebook(self, image, resources=None):
ports = [self.request_notebook_port()]
target_ports = [self.PORT]
volumes, volume_mounts = get_pod_volumes()
code_volume, code_volume_mount = self.get_notebook_code_volume()
volumes.append(code_volume)
volume_mounts.append(code_volume_mount)
deployment_name = constants.DEPLOYMENT_NAME.format(
project_uuid=self.project_uuid, name=self.NOTEBOOK_JOB_NAME)
notebook_token = self.get_notebook_token()
notebook_url = self._get_proxy_url(
namespace=self.namespace,
job_name=self.NOTEBOOK_JOB_NAME,
deployment_name=deployment_name,
port=ports[0])
notebook_dir = get_project_repos_path(self.project_name)
notebook_dir = '{}/{}'.format(notebook_dir, notebook_dir.split('/')[-1])
deployment = deployments.get_deployment(
namespace=self.namespace,
app=settings.APP_LABELS_NOTEBOOK,
name=self.NOTEBOOK_JOB_NAME,
project_name=self.project_name,
project_uuid=self.project_uuid,
volume_mounts=volume_mounts,
volumes=volumes,
image=image,
command=["/bin/sh", "-c"],
args=[
"jupyter notebook "
"--no-browser "
"--port={port} "
"--ip=0.0.0.0 "
"--allow-root "
"--NotebookApp.token={token} "
"--NotebookApp.trust_xheaders=True "
"--NotebookApp.base_url={base_url} "
"--NotebookApp.notebook_dir={notebook_dir} ".format(
port=self.PORT,
token=notebook_token,
base_url=notebook_url,
notebook_dir=notebook_dir)],
ports=target_ports,
container_name=settings.CONTAINER_NAME_PLUGIN_JOB,
resources=resources,
role=settings.ROLE_LABELS_DASHBOARD,
type=settings.TYPE_LABELS_EXPERIMENT)
deployment_labels = deployments.get_labels(app=settings.APP_LABELS_NOTEBOOK,
project_name=self.project_name,
project_uuid=self.project_uuid,
role=settings.ROLE_LABELS_DASHBOARD,
type=settings.TYPE_LABELS_EXPERIMENT)
self.create_or_update_deployment(name=deployment_name, data=deployment)
service = services.get_service(
namespace=self.namespace,
name=deployment_name,
labels=deployment_labels,
ports=ports,
target_ports=target_ports,
service_type=self._get_service_type())
self.create_or_update_service(name=deployment_name, data=service)
if self._use_ingress():
annotations = json.loads(settings.K8S_INGRESS_ANNOTATIONS)
paths = [{
'path': '/notebook/{}'.format(self.project_name.replace('.', '/')),
'backend': {
'serviceName': deployment_name,
'servicePort': ports[0]
}
}]
ingress = ingresses.get_ingress(namespace=self.namespace,
name=deployment_name,
labels=deployment_labels,
annotations=annotations,
paths=paths)
self.create_or_update_ingress(name=deployment_name, data=ingress)
def stop_notebook(self):
deployment_name = constants.DEPLOYMENT_NAME.format(project_uuid=self.project_uuid,
name=self.NOTEBOOK_JOB_NAME)
self.delete_deployment(name=deployment_name)
self.delete_service(name=deployment_name)
if self._use_ingress():
self.delete_ingress(name=deployment_name)
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
ef03b435fce9dbb91ba88d54ee8e945bdd417016 | 9d03d3e8b739a0a1aae7eca09fce6a6e3cd7fd9d | /model/position/__init__.py | 2d890d9eb6b9146c76025d13d2280bad7b01502c | [] | no_license | CallingWisdom/trade | c30954c8be17d7b140ad376011486caede69fd68 | a231ade6dbe99288a4ada2eec0e187b1e28594da | refs/heads/master | 2021-05-06T18:12:46.058972 | 2017-06-09T09:21:09 | 2017-06-09T09:21:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.position.base_position import BasePosition
from model.position.future_position import FuturePosition
from model.position.stock_position import StockPosition
class Positions(dict):
def __init__(self, position_cls):
super(Positions, self).__init__()
self._position_cls = position_cls
self._cached_positions = {}
def __missing__(self, key):
if key not in self._cached_positions:
self._cached_positions[key] = self._position_cls(key)
return self._cached_positions[key]
def get_or_create(self, key):
if key not in self:
self[key] = self._position_cls(key)
return self[key]
| [
"511735184@qq.com"
] | 511735184@qq.com |
2cc9599e40b6bdaa83c3872fc6617694066af3ab | f1c3a21c820fc1b0d182c859486cc6461f299bb9 | /TCN-TF/tcn.py | 4331af07f57e7fee40cbfa8036ae0ad6083a1730 | [] | no_license | JZDBB/AVEC | 85ee92a90ca9517780e4cc59d250d0b82c12cdeb | 79bd55b80be4e2ebd08c376f91900dbbb60e6dca | refs/heads/master | 2020-04-17T03:14:04.481303 | 2019-01-15T11:34:48 | 2019-01-27T11:35:03 | 166,172,956 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py | #coding: utf-8
'''
Author: Weiping Song
Time: April 24, 2018
'''
import tensorflow as tf
from wnconv1d import wnconv1d
class TemporalConvNet(object):
def __init__(self, num_channels, stride=1, kernel_size=2, dropout=0.2):
self.kernel_size=kernel_size
self.stride = stride
self.num_levels = len(num_channels)
self.num_channels = num_channels
self.dropout = dropout
self.is_training = tf.placeholder(shape=[], dtype=tf.bool)
def __call__(self, inputs):
inputs_shape = inputs.get_shape().as_list()
outputs = [inputs]
for i in range(self.num_levels):
dilation_size = 2 ** i
in_channels = inputs_shape[-1] if i == 0 else self.num_channels[i-1]
out_channels = self.num_channels[i]
output = self._TemporalBlock(outputs[-1], in_channels, out_channels, self.kernel_size,
self.stride, dilation=dilation_size, padding=(self.kernel_size-1)*dilation_size,
dropout=self.dropout, level=i)
outputs.append(output)
tf.summary.histogram('%d'%i, output)
return outputs[-1]
def _TemporalBlock(self, value, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2, level=0):
padded_value1 = tf.pad(value, [[0,0], [padding,0], [0,0]])
self.conv1 = wnconv1d(inputs=padded_value1,
filters=n_outputs,
kernel_size=kernel_size,
strides=stride,
padding='valid',
dilation_rate=dilation,
activation=None,
weight_norm=True, #default is false.
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv1')
self.output1 = tf.contrib.layers.dropout(tf.nn.elu(self.conv1), keep_prob=1-dropout, is_training=self.is_training)
padded_value2 = tf.pad(self.output1, [[0,0], [padding,0], [0,0]])
self.conv2 = wnconv1d(inputs=padded_value2,
filters=n_outputs,
kernel_size=kernel_size,
strides=stride,
padding='valid',
dilation_rate=dilation,
activation=None,
weight_norm=True, #default is False.
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv2')
self.output2 = tf.contrib.layers.dropout(tf.nn.elu(self.conv2), keep_prob=1-dropout, is_training=self.is_training)
if n_inputs != n_outputs:
res_x = tf.layers.conv1d(inputs=value,
filters=n_outputs,
kernel_size=1,
activation=None,
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv')
else:
res_x = value
return tf.nn.elu(res_x + self.output2)[:,2*padding:,:]
| [
"oxuyining@gmail.com"
] | oxuyining@gmail.com |
182058774046558c47942649a24c9481da11c275 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/vhrjoc001/question1.py | 7ecb00ea83049b6f3e69ecfb564bccfff7664dc8 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # test program for box drawer
import boxes
choice = input ("Choose test:\n")
action = choice[:1]
if action == 'a':
boxes.print_square ()
elif action == 'b':
width, height = map (int, choice[2:].split(" "))
print ("calling function")
boxes.print_rectangle (width, lll)
print ("called function")
elif action == 'c':
width, height = map (int, choice[2:].split(" "))
print ("calling function")
figure = boxes.get_rectangle (width, height)
print ("called function")
print (figure)
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
7e104bc331c9f6d2a933011e4a39b82e9eadc828 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_14465-1033/sdB_EC_14465-1033_coadd.py | fd7c867f87c741b45aa844001303b9f6bd4ad19b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[222.304042,-10.760042], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_EC_14465-1033/sdB_EC_14465-1033_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_EC_14465-1033/sdB_EC_14465-1033_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
53c5f847e2583a782f9bd1c46700bb9d009aaef4 | 0910e259a9bd252300f19b2ff22049d640f19b1a | /ml/m16_pipeline_RS3_wine.py | 7b06a57c93e8fc7ce2d3be0c4654e2fd695ad072 | [] | no_license | kimtaeuk-AI/Study | c7259a0ed1770f249b78f096ad853be7424a1c8e | bad5a0ea72a0117035b5e45652819a3f7206c66f | refs/heads/master | 2023-05-05T12:34:52.471831 | 2021-05-22T16:16:12 | 2021-05-22T16:16:12 | 368,745,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | #전처리 하나와 모델을 합침
import numpy as np
import tensorflow as tf
import pandas as pd
from sklearn.datasets import load_wine
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline, make_pipeline
import timeit
start_time = timeit.default_timer()
import warnings
warnings.filterwarnings('ignore')
dataset = load_wine()
x = dataset.data
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=44)
# Pipeline은 전처리 + 모델해줘서 MinMaxScaler문 생략 가능
# from sklearn.preprocessing import MinMaxScaler
# scale = MinMaxScaler()
# scale.fit(x_train)
# x_train = scale.transform(x_train)
# x_test = scale.transform(x_test)
parameters = [
{"svc__C" :[1,10,100,1000], "svc__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"svc__C" :[1,10,100], "svc__kernel":["rbf"], "svc__gamma":[0.001, 0.0001]}, #3x2 6번
{"svc__C" :[1,10,100,1000], "svc__kernel":["sigmoid"],"svc__gamma":[0.001, 0.0001]}] #4x2 8번
parameters = [
{"mal__C" :[1,10,100,1000], "mal__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"mal__C" :[1,10,100], "mal__kernel":["rbf"], "mal__gamma":[0.001, 0.0001]}, #3x2 6번
{"mal__C" :[1,10,100,1000], "mal__kernel":["sigmoid"],"mal__gamma":[0.001, 0.0001]}] #4x2 8번
# 언더바 (_) 두개 써줘야한다
# 2. 모델
Pipe = Pipeline([('scale', MinMaxScaler()), ('mal', SVC())]) #SVC모델과 MinMax 를합친다 , 괄호 조심
# pipe = make_pipeline(StandardScaler(), SVC()) # 두가지 방법이 있다.
# Pipeline 써주는 이유 : 트레인만 하는게 효과적, cv만큼 스케일링, 과적합 방지, 모델에 적합해서 성능이 강화 .....
model = GridSearchCV(Pipe, parameters, cv=5)
model.fit(x_train, y_train)
results = model.score(x_test, y_test)
print('results : ', results)
# MinMaxScaler
# results : 0.9666666666666667
# StandardScaler
# results : 0.9666666666666667
| [
"ki3123.93123@gmail.com"
] | ki3123.93123@gmail.com |
0d389ad98371546fa8ff77dbfb4acf4c1ea82b87 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_pyres.py | 451934aa41c0b1e439c98c40e31627b1188cf4eb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._pyre import _PYRE
#calss header
class _PYRES(_PYRE, ):
def __init__(self,):
_PYRE.__init__(self)
self.name = "PYRES"
self.specie = 'nouns'
self.basic = "pyre"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0214d99b83874837d35712c1e0bd2eb3d8662662 | ab9a0e787695b9d04483cac5b710931287ed3e54 | /ruia_cache/cache_patch/__init__.py | 0172eabb8a618b63f96a73bc4c75f3327546d912 | [
"Apache-2.0"
] | permissive | python-ruia/ruia-cache | 42a529b17192b31237bc18ad2126400ec20ce9dd | a18609b29e76ad11c81aa1254e6b2d8a49454abd | refs/heads/main | 2023-02-15T08:38:31.565745 | 2021-01-03T13:44:36 | 2021-01-03T13:44:36 | 325,297,883 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | #!/usr/bin/env python
"""
Created by howie.hu at 2021/1/3.
"""
from .req_cache import req_cache
from .resp_cache import resp_cache
| [
"howie6879@gmail.com"
] | howie6879@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.