blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f2bb5b89c2bf4b4fc694b91d1551cbe40a13be6
|
db80edb9be895c4ebcb9acac96eff92b7fda2bd3
|
/src/scripts/dataset_experiment_2019_1_3_(LNL)_train_script.py
|
fdf61ca86044647bec3707c7457949e6de9f1db1
|
[] |
no_license
|
YhHoo/AE-signal-model
|
9950182425377364d83a8a86b72ed181b789a599
|
8ba384397a88ea8316deee3173503fccb9e485af
|
refs/heads/master
| 2021-06-26T03:43:21.482586
| 2019-05-16T10:53:25
| 2019-05-16T10:53:25
| 131,477,937
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,510
|
py
|
# this is for bash to know the path of the src
# Iterative task
import sys
sys.path.append('C:/Users/YH/PycharmProjects/AE-signal-model')
import time
import tensorflow as tf
import argparse
from src.experiment_dataset.dataset_experiment_2019_1_3 import AcousticEmissionDataSet
from src.model_bank.dataset_2018_7_13_lcp_recognition_model import *
from src.utils.helpers import *
# ------------------------------------------------------------------------------------------------------------ ARG PARSE
parser = argparse.ArgumentParser(description='Input some parameters.')
parser.add_argument('--model', default=1, type=str, help='Model Name')
parser.add_argument('--kernel_size', default=1, type=int, nargs='+', help='kernel size')
parser.add_argument('--fc_size', default=1, type=int, nargs='+', help='fully connected size')
parser.add_argument('--epoch', default=100, type=int, help='Number of training epoch')
args = parser.parse_args()
MODEL_SAVE_FILENAME = args.model
RESULT_SAVE_FILENAME = 'C:/Users/YH/PycharmProjects/AE-signal-model/result/{}_result.txt'.format(MODEL_SAVE_FILENAME)
EPOCH = args.epoch
KERNEL_SIZE = args.kernel_size
FC_SIZE = args.fc_size
print('Result saving filename: ', RESULT_SAVE_FILENAME)
print('Conv Kernel size: ', KERNEL_SIZE)
print('FC neuron size: ', FC_SIZE)
# ----------------------------------------------------------------------------------------------------------- GPU CONFIG
# instruct GPU to allocate only sufficient memory for this script
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# ------------------------------------------------------------------------------------------------------------ DATA PREP
ae_data = AcousticEmissionDataSet(drive='G')
train_x, train_y, test_x, test_y = ae_data.random_leak_noleak_include_unseen(train_split=0.8)
train_x_reshape = train_x.reshape((train_x.shape[0], train_x.shape[1], 1))
test_x_reshape = test_x.reshape((test_x.shape[0], test_x.shape[1], 1))
train_y_cat = to_categorical(train_y, num_classes=2)
test_y_cat = to_categorical(test_y, num_classes=2)
# ------------------------------------------------------------------------------------------------------- MODEL TRAINING
lcp_model = LNL_binary_model_2(kernel_size=KERNEL_SIZE, fc_size=FC_SIZE)
lcp_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
# saving best weight setting
logger = ModelLogger(model=lcp_model, model_name=MODEL_SAVE_FILENAME)
save_weight_checkpoint = logger.save_best_weight_cheakpoint(monitor='val_loss', period=5)
# start training
total_epoch = EPOCH
time_train_start = time.time()
history = lcp_model.fit(x=train_x_reshape,
y=train_y_cat,
validation_data=(test_x_reshape, test_y_cat),
callbacks=[save_weight_checkpoint],
epochs=total_epoch,
batch_size=200,
shuffle=True,
verbose=2)
time_train = time.time() - time_train_start
logger.save_architecture(save_readable=True)
# ------------------------------------------------------------------------------------------------------- LEARNING CURVE
# name for fig suptitle and filename
lr_name = '{}_LrCurve'.format(MODEL_SAVE_FILENAME)
fig_lr = plt.figure(figsize=(10, 7))
fig_lr.subplots_adjust(left=0.08, bottom=0.07, right=0.96, top=0.89)
fig_lr.suptitle(lr_name)
ax_lr = fig_lr.add_subplot(1, 1, 1)
ax_lr.plot(history.history['loss'], label='train_loss')
ax_lr.plot(history.history['val_loss'], label='val_loss')
ax_lr.plot(history.history['acc'], label='train_acc')
ax_lr.plot(history.history['val_acc'], label='val_acc')
ax_lr.legend()
fig_lr_save_filename = direct_to_dir(where='result') + '{}.png'.format(lr_name)
fig_lr.savefig(fig_lr_save_filename)
# evaluate ------------------------------------------------------------------------------------------ EVALUATE REPORTING
# no of trainable parameter
trainable_count = int(np.sum([K.count_params(p) for p in set(lcp_model.trainable_weights)]))
# find highest val acc and lowest loss
best_val_acc_index = np.argmax(history.history['val_acc'])
best_val_loss_index = np.argmin(history.history['val_loss'])
# loading best model saved
lcp_best_model = load_model(model_name=MODEL_SAVE_FILENAME)
# test with val data
time_predict_start = time.time()
prediction = lcp_best_model.predict(test_x_reshape)
time_predict = time.time() - time_predict_start
prediction_argmax = np.argmax(prediction, axis=1)
actual_argmax = np.argmax(test_y_cat, axis=1)
# plot validation data
evaluate_name = '{}_Evaluate'.format(MODEL_SAVE_FILENAME)
fig_evaluate = plt.figure(figsize=(10, 7))
fig_evaluate.subplots_adjust(left=0.08, bottom=0.07, right=0.96, top=0.89)
fig_evaluate.suptitle(evaluate_name)
ax_evaluate = fig_evaluate.add_subplot(1, 1, 1)
ax_evaluate.plot(actual_argmax, color='r', label='Actual')
ax_evaluate.plot(prediction_argmax, color='b', label='Prediction', linestyle='None', marker='x')
ax_evaluate.legend()
fig_lr_save_filename = direct_to_dir(where='result') + '{}.png'.format(evaluate_name)
fig_evaluate.savefig(fig_lr_save_filename)
print('\n---------- EVALUATION RESULT SCRIPT LNL 1 -----------')
print('**Param in tuning --> [pool:(3, 2, 2), split=0.8, val_included_test]')
print('Model Trainable params: {}'.format(trainable_count))
print('Best Validation Accuracy: {:.4f} at Epoch {}/{}'.format(history.history['val_acc'][best_val_acc_index],
best_val_acc_index,
total_epoch))
print('Lowest Validation Loss: {:.4f} at Epoch {}/{}'.format(history.history['val_loss'][best_val_loss_index],
best_val_loss_index,
total_epoch))
print('Time taken to execute 1 sample: {}s'.format(time_predict / len(test_x_reshape)))
print('Time taken to complete {} epoch: {:.4f}s'.format(total_epoch, time_train))
rpf_result = logger.save_recall_precision_f1(y_pred=prediction_argmax, y_true=actual_argmax, all_class_label=[0, 1])
print('\nDist and Labels')
print('[NoLeak] -> class_0')
print('[Leak] -> class_1')
# saving the printed result again
with open(RESULT_SAVE_FILENAME, 'w') as f:
f.write('\n---------- EVALUATION RESULT SCRIPT LNL 1 -----------')
f.write('\nModel Conv Kernels Size: {}, FC Size: {}'.format(KERNEL_SIZE, FC_SIZE))
f.write('\nModel Trainable params: {}'.format(trainable_count))
f.write('\nBest Validation Accuracy: {:.4f} at Epoch {}/{}'.format(history.history['val_acc'][best_val_acc_index],
best_val_acc_index,
total_epoch))
f.write('\nLowest Validation Loss: {:.4f} at Epoch {}/{}'.format(history.history['val_loss'][best_val_loss_index],
best_val_loss_index,
total_epoch))
f.write('\nTime taken to execute 1 sample: {}s'.format(time_predict / len(test_x_reshape)))
f.write('\nTime taken to complete {} epoch: {:.4f}s'.format(total_epoch, time_train))
for i in rpf_result:
f.write('\n' + i)
f.write('\n\nDist and Labels')
f.write('\n[NoLeak] -> class_0')
f.write('\n[Leak] -> class_1')
|
[
"hooyuheng@gmail.com"
] |
hooyuheng@gmail.com
|
8699e2d9f2857916e726b82f33a9737a08f29c55
|
da9c4a9a92d49d2fb2983a54e0f64c2a1ce8aa19
|
/symphony/cli/pyinventory/graphql/fragment/service_type.py
|
721c139d30f17b42911093ca706dd702b1e3e012
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
rohan-prasad/magma
|
347c370347724488215a0783504788eac41d8ec7
|
2c1f36d2fd04eae90366cc8b314eaab656d7f8ad
|
refs/heads/master
| 2022-10-14T14:08:14.067593
| 2020-06-11T23:52:03
| 2020-06-11T23:54:27
| 271,671,835
| 0
| 0
|
NOASSERTION
| 2020-06-12T00:20:23
| 2020-06-12T00:17:39
| null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.property_type import PropertyTypeFragment, QUERY as PropertyTypeFragmentQuery
from ..fragment.service_endpoint_definition import ServiceEndpointDefinitionFragment, QUERY as ServiceEndpointDefinitionFragmentQuery
QUERY: List[str] = PropertyTypeFragmentQuery + ServiceEndpointDefinitionFragmentQuery + ["""
fragment ServiceTypeFragment on ServiceType {
id
name
hasCustomer
propertyTypes {
...PropertyTypeFragment
}
endpointDefinitions {
...ServiceEndpointDefinitionFragment
}
}
"""]
@dataclass
class ServiceTypeFragment(DataClassJsonMixin):
@dataclass
class PropertyType(PropertyTypeFragment):
pass
@dataclass
class ServiceEndpointDefinition(ServiceEndpointDefinitionFragment):
pass
id: str
name: str
hasCustomer: bool
propertyTypes: List[PropertyType]
endpointDefinitions: List[ServiceEndpointDefinition]
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
024f42d62675e06d3eb3019186c76208aeeb1a10
|
13ce655f82b93fb4089b29e62a8e33dd7ff05493
|
/src/wai/json/object/property/proxies/__init__.py
|
02e3fe800f49bb03e939e66e0758c8f5b174e04d
|
[
"MIT"
] |
permissive
|
waikato-datamining/wai-json
|
603b90b13155114bbfb60b40f45100248c03d710
|
cb013fb16e7c1b8d91e040a387a143d29d4ced96
|
refs/heads/master
| 2021-01-07T15:06:22.957223
| 2020-03-17T23:59:14
| 2020-03-17T23:59:14
| 241,736,670
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
"""
Package for proxy objects. Proxies are objects which act like other
objects, but can be converted to and from JSON, and subscribe to a
schema which is enforced during programmatic use as well as conversion.
"""
from ._ArrayProxy import ArrayProxy
from ._MapProxy import MapProxy
|
[
"coreytsterling@gmail.com"
] |
coreytsterling@gmail.com
|
35264d72969241f81b66b9d5a4b9c691c83f4953
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03147/s721564434.py
|
2d4d71c12e237e3c8db09fbb5ea107d30f69bc5b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
n =int(input())
h=list(map(int,input().split()))
ans=0
p=0
for i in range(n):
if h[i]>=p:
ans+=h[i]-p
p=h[i]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
acc77520c22c42f333191aa95a4b6817dbac255d
|
29d1e5d1190ddd6cdf1e1b97b91f442765905454
|
/Chapter 2/demo_str/demo_split.py
|
056d3e3c61bab0e7d590ab3dc4357f41524963ed
|
[] |
no_license
|
SkewwG/SCIP_Python_Learn
|
abe199e1701022c1491c9e5d6de98d653c267ab9
|
11216e958f5a77c90c0583ca5cfdb1ec9fb2896c
|
refs/heads/master
| 2021-09-13T18:40:42.426924
| 2018-05-03T07:33:52
| 2018-05-03T07:33:52
| 116,967,610
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
'''
split(...)
通过指定分隔符对字符串进行切片,如果参数num有指定值,则仅分隔 num 个子字符串
S.split(sep=None, maxsplit=-1) -> list of strings
Return a list of the words in S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are
removed from the result.
'''
print(help(str.split))
a = "abcdeabcdeabcdeabcdeabcdeabcde"
print(a.split('c')) # ['ab', 'deab', 'deab', 'deab', 'deab', 'deab', 'de']
print(a.split('c', 1)) # ['ab', 'deabcdeabcdeabcdeabcdeabcde']
print(a.split('c', 3)) # ['ab', 'deab', 'deab', 'deabcdeabcdeabcde']
|
[
"446106525@qq.com"
] |
446106525@qq.com
|
810083db87880c4c4b1795f932349768dc679df6
|
84ecc3f416647b4c6e40faa6d5392421bc13a4ec
|
/exercise3.py
|
372b35f405eb76c6295a87135502cea961c1395d
|
[] |
no_license
|
Deer5000/DebuggingExercise1_4
|
b358670708c1b74125b3badea256ee980aef6672
|
35caeeeb8fce0480aa99ea3f7ee1de05624cf9df
|
refs/heads/master
| 2023-02-28T15:41:41.475716
| 2021-01-24T23:55:45
| 2021-01-24T23:55:45
| 332,583,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,561
|
py
|
"""
Exercise 3
"""
# PART 1: Gather Information
#
# TODO: Gather information about the source of the error and paste your findings here. E.g.:
# - What is the expected vs. the actual output?
# - expected a sorted number but got an error instead
# - What error message (if any) is there?
# - Error message is "IndexError: list index out of range"
# - What line number is causing the error?
# - while key < arr[j]
# - What can you deduce about the cause of the error?
# - Developer does not know how to insert error and to worry about the last element to move
# PART 2: State Assumptions
#
# TODO: State your assumptions here or say them out loud to your partner ... Worked with donny Vallejo
# Make sure to be SPECIFIC about what each of your assumptions is!
# HINT: It may help to draw a picture to clarify what your assumptions are.
# - Developer didnt foresee that if element to move (j's index) is less than the length of arr, then that number will not exist anymore
# What is insertion sort: https://www.youtube.com/watch?v=JU767SDMDvA
def insertion_sort(arr):
"""Performs an Insertion Sort on the array arr."""
for i in range(1, len(arr)):
key = arr[i]
j = i-1
while key < arr[j] and j >= 0: #Move elements of arr[0..i-1], that are greater than key, to one position ahead of their current position
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
if __name__ == '__main__':
print('### Problem 3 ###')
answer = insertion_sort([5, 2, 3, 1, 6])
print(answer)
|
[
"khidrbrinkley32@gmail.com"
] |
khidrbrinkley32@gmail.com
|
bfbbe25dbfa3f0b2ae468d54e782e2f14c642e75
|
ecdf9256853e11d6105e2b9ad92ba912602d97d7
|
/hackerrank/implementation/utopian_tree.py
|
3925093946f80b538058215341cddd8a3778c7ea
|
[] |
no_license
|
rgsriram/Algorithms
|
364fda568356834e32ec247438d21202bebc838d
|
d4f9acb1a60bd098a601d8173dfdad447a02fd74
|
refs/heads/master
| 2021-01-10T05:11:05.688731
| 2019-03-20T04:59:10
| 2019-03-20T04:59:10
| 49,176,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
__author__ = 'sriram'
"""
Problem from: HackerRank
Domain: Algorithms
Name: Utopian Tree
"""
def get_height(seasons, initial_height=1):
for i in xrange(1, (seasons+1), 1):
if i % 2 == 0:
initial_height += 1
else:
initial_height *= 2
return initial_height
def main():
t = int(raw_input().strip())
for a0 in xrange(t):
n = int(raw_input().strip())
print get_height(seasons=n)
if __name__ == '__main__':
main()
|
[
"srignsh22@gmail.com"
] |
srignsh22@gmail.com
|
92d61573f163cc264a1c3357554348a137fda00e
|
cfc9bb332f6c18c52f941aa4919e80a736b33453
|
/code/set_1_array/287_find_the_duplicate_number.py
|
897e247c171be6235bb10e77d7132c6e689772ae
|
[] |
no_license
|
JagritiG/interview-questions-answers-python
|
8992c64b754d81c76f4d2d29f92fbd9abe522a15
|
411536a94d4a2f9a64e4f06a41dc8aef4111e80f
|
refs/heads/master
| 2022-11-23T21:38:53.986360
| 2020-08-02T22:17:13
| 2020-08-02T22:17:13
| 267,738,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
# Find the Duplicate Number
# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive),
# prove that at least one duplicate number must exist. Assume that there is only one duplicate number,
# find the duplicate one.
# Example 1:
# Input: [1,3,4,2,2]
# Output: 2
# Example 2:
# Input: [3,1,3,4,2]
# Output: 3
# Note:
# You must not modify the array (assume the array is read only).
# You must use only constant, O(1) extra space.
# Your runtime complexity should be less than O(n2).
# There is only one duplicate number in the array, but it could be repeated more than once.
# =======================================================================================================
# Algorithm:
# Traverse the array --> for each index, perform the following steps:
# 1. at index i of an array nums, if nums[abs(nums[i]) >= 0, make it negative
# 2. at index i of an array nums, if nums[abs(nums[i]) < 0, (negative means the value already encountered),
# return the absolute value of the element at index i --> abs(nums[i]) <-- result
# TC: O(n)
# SC: O(1)
# =======================================================================================================
def find_duplicate(nums):
for i in range(len(nums)):
if nums[abs(nums[i])] < 0:
return abs(nums[i])
else:
nums[abs(nums[i])] *= -1
if __name__ == "__main__":
# inputs = [1, 3, 4, 2, 2] # output: 2
inputs = [3, 1, 3, 4, 2] # output: 3
print(find_duplicate(inputs))
|
[
"jagritigoswami84@gmail.com"
] |
jagritigoswami84@gmail.com
|
20c60c4ab3ffcaef59f386754bf8ec8172462fd8
|
404fafd24140a474b868a3f19681ffae80f3cef6
|
/oregoninvasiveshotline/reports/search_indexes.py
|
787f067cc9ac921ecf001df3ed123fe526b6738d
|
[] |
no_license
|
wylee/oregoninvasiveshotline
|
50590c2684c4445c58574e773d47936cbccb2d47
|
221f4c5f0307d0e5ffd3f46b8048b5a826388f98
|
refs/heads/develop
| 2020-12-03T09:11:14.606417
| 2017-04-14T17:22:46
| 2017-04-14T17:56:02
| 43,166,993
| 0
| 0
| null | 2015-09-25T18:06:50
| 2015-09-25T18:06:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,198
|
py
|
from haystack import indexes
from .models import Report
class ReportIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
report_id = indexes.IntegerField(model_attr='report_id', boost=1.125)
title = indexes.CharField(model_attr='title', boost=1.125)
description = indexes.CharField(model_attr='description')
location = indexes.CharField(model_attr='location')
edrr_status = indexes.CharField(model_attr='get_edrr_status_display')
ofpd = indexes.BooleanField(model_attr='created_by__has_completed_ofpd')
category = indexes.CharField(model_attr='category__name', boost=1.0625)
category_id = indexes.IntegerField(model_attr='category__pk')
claimed_by = indexes.CharField(model_attr='claimed_by', null=True)
claimed_by_id = indexes.IntegerField(model_attr='claimed_by__user_id', null=True)
county = indexes.CharField(model_attr='county__name', null=True, boost=1.0625)
county_id = indexes.IntegerField(model_attr='county__pk', null=True)
created_by = indexes.CharField(model_attr='created_by')
created_by_id = indexes.IntegerField(model_attr='created_by__user_id')
created_on = indexes.DateTimeField(model_attr='created_on')
is_archived = indexes.BooleanField(model_attr='is_archived', default=False)
is_public = indexes.BooleanField(model_attr='is_public', default=False)
species_id = indexes.CharField(model_attr='species__pk', null=True)
species = indexes.CharField(model_attr='species__title', null=True, boost=1.0625)
reported_species = indexes.CharField(model_attr='reported_species__name', null=True)
actual_species = indexes.CharField(model_attr='actual_species__name', null=True)
# Non-indexed fields (i.e., fields that we don't search on but that
# we want available in search results).
icon_url = indexes.CharField(model_attr='icon_url', indexed=False)
image_url = indexes.CharField(model_attr='image_url', default=None, indexed=False)
lat = indexes.FloatField(model_attr='point__y', indexed=False)
lng = indexes.FloatField(model_attr='point__x', indexed=False)
def get_model(self):
return Report
|
[
"wbaldwin@pdx.edu"
] |
wbaldwin@pdx.edu
|
b5bd8cc19f966c69bb896f02648f30ff4beea112
|
fbfc0e4d72e2d42b079804775f717833b946fab5
|
/conda_build/main_index.py
|
e64aefc6bba616b7a323c37c83953a0de17cb27e
|
[] |
no_license
|
minrk/conda-build
|
a13eb0a6ebc3a66e276b1ab20ffa97b035434c71
|
71b561831665ca7b8c906f87789ea8ffc38c6ce5
|
refs/heads/master
| 2023-06-08T09:51:37.787222
| 2014-02-13T23:48:42
| 2014-02-13T23:48:42
| 16,822,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
from __future__ import print_function, division, absolute_import
import os
from os.path import abspath
from optparse import OptionParser
from conda_build.index import update_index
def main():
p = OptionParser(
usage="usage: %prog [options] DIR [DIR ...]",
description="display useful information about tar files")
p.add_option('-f', "--force",
action = "store_true",
help = "force reading all files")
p.add_option('-q', "--quiet",
action = "store_true")
opts, args = p.parse_args()
if len(args) == 0:
dir_paths = [os.getcwd()]
else:
dir_paths = [abspath(path) for path in args]
for path in dir_paths:
update_index(path, verbose=not opts.quiet, force=opts.force)
if __name__ == '__main__':
main()
|
[
"ilanschnell@gmail.com"
] |
ilanschnell@gmail.com
|
51b9750566b082570f8361843c139be599d70dc7
|
32ac0ae3eea0d8d8fd60ddee956c6ef864f7a8ae
|
/oop_examples.py
|
51c58bda09af0e691e691f2275baa0428605164a
|
[] |
no_license
|
EricSchles/oop_py
|
1867778c70597a7d91da256be0cf93017e4627df
|
52e30b0e804e4fc925935f95357cedb58da7d06c
|
refs/heads/master
| 2016-09-06T16:37:35.052549
| 2015-07-28T21:30:54
| 2015-07-28T21:30:54
| 39,858,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
class Node:
def __init__(self,data,next=None):
self.data = data
self.next = next
def __str__(self):
return repr(self.data)
class Vertex(Node):
def __init__(self,data,edges=[]):
self.data = data
self.edges = edges
def __eq__(self,other):
if isinstance(other,self.__class__):
return self.data == other.data
elif type(self.data) == type(other):
return self.data == other
else:
return False
def __ne__(self,other):
return not self.__eq__(other)
class Graph:
def __init__(self):
self.vertices = []
self.edge_list = [] #a list of dictionaries
def print_nodes(self):
for v in self.vertices:
print v
def print_edges(self):
for pair in self.edge_list:
print pair
def add_node(self,vertex):
v = Vertex(vertex)
self.vertices.append(v)
self.vertices.sort()
def add_edge(self,vertex1,vertex2):
if not vertex1 in self.vertices:
self.add_node(vertex1)
if not vertex2 in self.vertices:
self.add_node(vertex2)
v1 = Vertex(vertex1)
v2 = Vertex(vertex2)
v1.edges.append(v2)
v2.edges.append(v1)
self.edge_list.append({vertex1:vertex2})
self.edge_list.append({vertex2:vertex1})
if __name__ == '__main__':
g = Graph()
g.add_node(5)
g.add_node(7)
g.add_edge(5,7)
g.print_nodes()
g.print_edges()
|
[
"ericschles@gmail.com"
] |
ericschles@gmail.com
|
45580a16f6a18dad43e707e5d56a5ccb7cd5c775
|
ced1068f3cbab76399490b5e1b2e7c496555639c
|
/pslist2.py
|
9f6a763d1d733c047656acd196021f722920f3de
|
[] |
no_license
|
ravijaya/july23
|
744d5e88ed5ab414a1097a107ef9577664da8b73
|
e29ca509ac2bb38a3ddc93e2185daf54832722c3
|
refs/heads/master
| 2020-06-23T01:04:50.985147
| 2019-07-24T11:43:52
| 2019-07-24T11:43:52
| 198,453,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
# delete by index
items = [2.2, 'pam', .98, 'tim', 'pat', 1.2, 'kim', .67]
print(items)
print()
value = items.pop()
print(value)
print(items)
print()
items = [2.2, 'pam', .98, 'tim', 'pat', 1.2, 'kim', .67]
print(items)
print()
value = items.pop(5)
print(value)
print(items)
print()
|
[
"ravijaya@localhost.localdomain"
] |
ravijaya@localhost.localdomain
|
e6946326b32ac4520b6a43c8ce3fbe9617677612
|
f3b5c4a5ce869dee94c3dfa8d110bab1b4be698b
|
/tools/sandesh/library/common/test/SConscript
|
96a9520dcf0fe554cf04fa620990b5020f8b8421
|
[
"Apache-2.0"
] |
permissive
|
pan2za/ctrl
|
8f808fb4da117fce346ff3d54f80b4e3d6b86b52
|
1d49df03ec4577b014b7d7ef2557d76e795f6a1c
|
refs/heads/master
| 2021-01-22T23:16:48.002959
| 2015-06-17T06:13:36
| 2015-06-17T06:13:36
| 37,454,161
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
# -*- mode: python; -*-
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
Import('SandeshEnv')
env = SandeshEnv.Clone()
env.Append(CPPPATH = [
Dir('#build/include').abspath,
Dir('#controller/src').abspath,
Dir('#tools').abspath,
])
SandeshBufferTestGenCppFiles = env.SandeshGenCpp('sandesh_buffer_test.sandesh')
SandeshBufferTestGenCppSrcs = env.ExtractCpp(SandeshBufferTestGenCppFiles)
SandeshBufferTestGenCppObjs = env.Object(SandeshBufferTestGenCppSrcs)
SandeshBufferTestGenCFiles = env.SandeshGenC('sandesh_buffer_test.sandesh')
SandeshBufferTestGenCSrcs = env.ExtractC(SandeshBufferTestGenCFiles)
SandeshBufferTestGenCObjs = env.Object(SandeshBufferTestGenCSrcs)
SandeshEnv['SandeshBufferTestGenCppObjs'] = SandeshBufferTestGenCppObjs
SandeshEnv['SandeshBufferTestGenCObjs'] = SandeshBufferTestGenCObjs
|
[
"pan2za@live.com"
] |
pan2za@live.com
|
|
887e2b6ff01a0510044a6bf19ef7078447cafaab
|
d063684dd03293eb0f980568af088d26ab087dbe
|
/debadmin/migrations/0093_user_addon_cart_item_cart_id.py
|
e3e4b69f8eb6eac7dc10b7c7fc76391b4bee6345
|
[] |
no_license
|
abhaysantra/debscientific
|
ce88e5ef44da8d6771c3652ed0ad02900ccd8ed2
|
88ec65616fd24052bbdbba8b00beba85493f5aea
|
refs/heads/master
| 2020-11-26T22:09:33.820247
| 2019-12-20T07:58:43
| 2019-12-20T07:58:43
| 229,213,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Generated by Django 2.2.6 on 2019-11-27 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('debadmin', '0092_user_addon_cart_item'),
]
operations = [
migrations.AddField(
model_name='user_addon_cart_item',
name='cart_id',
field=models.IntegerField(null=True),
),
]
|
[
"abhay.santra@gmail.com"
] |
abhay.santra@gmail.com
|
35a67b282264036588f8a8fee11b90cbf8a73faf
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Classes/Installatie/Ecovallei.py
|
2b15e33f943ef57001d14f6a606039fb858c4be9
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
# coding=utf-8
from OTLMOW.OTLModel.Classes.ImplementatieElement.AIMObject import AIMObject
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Ecovallei(AIMObject, VlakGeometrie):
"""Een vallei onder de verkeersbrug waar het landschap gewoon onderdoor loopt en minimaal wordt verstoord."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/installatie#Ecovallei'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AIMObject.__init__(self)
VlakGeometrie.__init__(self)
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
b70d805e1f5ae369892c503ffe7cb69f881a3ed5
|
c78b20665068e712917558dbdd512641b0b90c80
|
/rebecca/fanstatic/apis.py
|
1cf4b8101d5b6c332c1f6db3af54c37e02c1f3c1
|
[
"MIT"
] |
permissive
|
rebeccaframework/rebecca.fanstatic
|
9d60cb29f4bee5e5a1dbc8cfcc43e3ac4c723c43
|
9f71d5f7d0a605b0c9ad165b20958d88cfdbcf69
|
refs/heads/master
| 2016-09-06T14:08:34.278688
| 2012-03-20T08:23:42
| 2012-03-20T08:23:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,464
|
py
|
from fanstatic import get_library_registry
from zope.interface import implementer
from .interfaces import IFanstaticSet
def get_fanstatic_set(request):
reg = request.registry
fanstatic_set = reg.queryUtility(IFanstaticSet)
return fanstatic_set
def need_fanstatic(request, renderer_name):
fanstatic_set = get_fanstatic_set(request)
if fanstatic_set is None:
return
fanstatic_set(renderer_name)
class FanstaticSet(object):
def __init__(self):
self.fanstatics = []
def add_fanstatic(self, resources, renderer_name_pattern):
self.fanstatics.append(Fanstatic(resources, renderer_name_pattern))
def __call__(self, renderer_name):
for f in self.fanstatics:
f(renderer_name)
def iter_resources(self):
printed = set()
for f in self.fanstatics:
for r in f.resources:
lib = r.library
if lib not in printed:
yield lib
printed.add(lib)
class Fanstatic(object):
def __init__(self, resources, renderer_name_regex):
self.resources = resources
self.regex = renderer_name_regex
def match(self, renderer_name):
return self.regex.match(renderer_name)
def __call__(self, renderer_name):
if self.match(renderer_name):
for resource in self.resources:
from fanstatic.core import get_needed
resource.need()
|
[
"aodagx@gmail.com"
] |
aodagx@gmail.com
|
49c1a6ffb77eb42628a14c31a91f6da58b557a6d
|
10e8fa6e43a54b3bbb89326a7d5786d50a625551
|
/04. Inheritance/venv/Scripts/pip3.8-script.py
|
49afacc31ed499f32cbf17a0824ad4d92a2dd157
|
[] |
no_license
|
ramona-2020/Python-OOP
|
cbc7e5fadfdc907e51c83313e0ffb1f4f5f83f70
|
7404908f50d30c533f0fca2fd08d0290526686a5
|
refs/heads/master
| 2023-03-20T18:43:18.389720
| 2020-06-07T15:20:00
| 2020-06-07T15:20:00
| 523,400,905
| 1
| 0
| null | 2022-08-10T15:38:09
| 2022-08-10T15:38:08
| null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
#!"D:\User\Desktop\Python Projects_SoftUni\Python-OOP\04. Inheritance\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"rossavelrs@yahoo.com"
] |
rossavelrs@yahoo.com
|
6f2cafd43a0d793c14a963dee3e2c6ede3bc62b3
|
a88207cf67ad0d65afdd57d5f5144cbc09995f60
|
/test/pPb/step2_RAW2DIGI_L1Reco_RECO.py
|
bd20aa632197a2c164cdf5066199385358db4aa7
|
[] |
no_license
|
pfs/TopFromHeavyIons
|
211184bad34e4ae11e6216689e5141a132e14542
|
a75ed1fc68d24682dad3badacf2726dc2b7ff464
|
refs/heads/master
| 2020-12-29T02:44:30.348481
| 2017-06-16T14:53:35
| 2017-06-16T14:53:35
| 37,029,781
| 1
| 1
| null | 2015-10-11T08:28:35
| 2015-06-07T19:47:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,477
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step2 --filein file:step1.root --fileout file:step2.root --mc --eventcontent AODSIM --datatier AODSIM --conditions 80X_mcRun2_pA_v4 --customise_commands process.bunchSpacingProducer.bunchSpacingOverride=cms.uint32(25)\n process.bunchSpacingProducer.overrideBunchSpacing=cms.bool(True) -n -1 --step RAW2DIGI,L1Reco,RECO --era Run2_2016_pA
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('RECO',eras.Run2_2016_pA)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:step1.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('step2 nevts:-1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.AODSIMoutput = cms.OutputModule("PoolOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(4),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('AODSIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(15728640),
fileName = cms.untracked.string('file:step2.root'),
outputCommands = process.AODSIMEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '80X_mcRun2_pA_v4', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.AODSIMoutput_step = cms.EndPath(process.AODSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.AODSIMoutput_step)
# Customisation from command line
process.bunchSpacingProducer.bunchSpacingOverride=cms.uint32(25)
process.bunchSpacingProducer.overrideBunchSpacing=cms.bool(True)
# Customisation from command line
process.bunchSpacingProducer.bunchSpacingOverride=cms.uint32(25)
process.bunchSpacingProducer.overrideBunchSpacing=cms.bool(True)
|
[
"psilva@cern.ch"
] |
psilva@cern.ch
|
457d08945aaab8688e20d9b67d9f662e622f45c7
|
05083d24088bbb3bfb7cdd162c101c72e18bc3a6
|
/containers/failures/router/failure1/myapp.py
|
91a0fcf8f0bc567cc5d9e9642a2f9910e038d684
|
[
"Apache-2.0"
] |
permissive
|
crossbario/crossbar-examples
|
f5e14b62db0f14e20ab54346cd4e8c3276aa6449
|
aa31d9fe3abcb4b797931356b5a2ceeac64229c3
|
refs/heads/master
| 2023-01-11T02:36:00.883034
| 2023-01-03T11:12:06
| 2023-01-03T11:12:06
| 28,035,551
| 100
| 122
|
Apache-2.0
| 2023-01-03T11:12:07
| 2014-12-15T12:23:02
|
HTML
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
from twisted.logger import Logger
from autobahn.twisted.wamp import ApplicationSession
class MySession(ApplicationSession):
log = Logger()
def __init__(self, config):
self.log.info("MySession.__init__()")
ApplicationSession.__init__(self, config)
@inlineCallbacks
def onJoin(self, details):
self.log.info("MySession.onJoin()")
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
45aeeec4bced02a189d55b9a5a3b75962906bfd3
|
457e2f5b2a26877df739e314ec1560e8a3ecfb97
|
/rebind/baseRig/util/nodePVpos.py
|
c0fe805be2194bfad4641759e25560502c3f02dd
|
[] |
no_license
|
mappp7/tools
|
f6685d9a682bd540d59c1bff0cebb60f79fd6556
|
c537e7648112c51ba4f44225418e773ee6b8be6c
|
refs/heads/master
| 2021-01-14T16:40:44.450790
| 2020-10-30T05:30:27
| 2020-10-30T05:30:27
| 242,682,763
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
import maya.cmds as cmds
import util.homeNul as HN
# Node base PV position !
# 3 joint list
#jnt = cmds.ls( sl=True, type='joint' )
def nodePVconnect( jnt ):
#decomposeMatrix
rootJdecompose = cmds.createNode( 'decomposeMatrix', n=jnt[0].replace( 'JNT', 'DCM' ) )
middleJdecompose = cmds.createNode( 'decomposeMatrix', n=jnt[1].replace( 'JNT', 'DCM' ) )
tipJdecompose = cmds.createNode( 'decomposeMatrix', n=jnt[2].replace( 'JNT', 'DCM' ) )
#connections
cmds.connectAttr( '%s.worldMatrix[0]' % jnt[0], '%s.inputMatrix' % rootJdecompose )
cmds.connectAttr( '%s.worldMatrix[0]' % jnt[1], '%s.inputMatrix' % middleJdecompose )
cmds.connectAttr( '%s.worldMatrix[0]' % jnt[2], '%s.inputMatrix' % tipJdecompose )
#plusMinusAverage
sumPMA = cmds.createNode( 'plusMinusAverage', n='%sPos_sum_%sPos_PMA' % ( jnt[0].replace( '_JNT', '' ), jnt[2].replace( '_JNT', '' ) ) )
cmds.setAttr( '%s.operation' % sumPMA, 1 )
#connections
cmds.connectAttr( '%s.outputTranslate' % rootJdecompose, '%s.input3D[0]' % sumPMA )
cmds.connectAttr( '%s.outputTranslate' % tipJdecompose, '%s.input3D[1]' % sumPMA )
#multiplyDivide
divideSumPMA = cmds.createNode( 'multiplyDivide', n=sumPMA.replace( 'PMA', 'halfDvide_MPD' ) )
cmds.setAttr( '%s.operation' % divideSumPMA, 2 )
cmds.setAttr( '%s.input2X' % divideSumPMA, 2 )
cmds.setAttr( '%s.input2Y' % divideSumPMA, 2 )
cmds.setAttr( '%s.input2Z' % divideSumPMA, 2 )
#connections
cmds.connectAttr( '%s.output3D' % sumPMA, '%s.input1' % divideSumPMA )
#plusMinusAverage( calculate vector )
VT = cmds.createNode( 'plusMinusAverage', n='to_%s_vector_PMA' % jnt[1].replace( 'JNT', 'joint' ) )
cmds.setAttr( '%s.operation' % VT, 2 )
#connections
cmds.connectAttr( '%s.outputTranslate' % middleJdecompose, '%s.input3D[0]' % VT )
cmds.connectAttr( '%s.output' % divideSumPMA, '%s.input3D[1]' % VT )
# offset
offsetVector = cmds.createNode( 'multiplyDivide', n='%s_MPD' % VT.replace( 'PMA', 'offset' ) )
cmds.connectAttr( '%s.output3D' % VT, '%s.input1' % offsetVector )
#plusMinusAverage( middleJ + offset + vector )
PVposition = cmds.createNode( 'plusMinusAverage', n='%s_vector_PMA' % divideSumPMA.replace( 'MPD', 'sum' ) )
cmds.setAttr( '%s.operation' % PVposition, 1 )
#connections
cmds.connectAttr( '%s.output' % divideSumPMA, '%s.input3D[0]' % PVposition )
cmds.connectAttr( '%s.output' % offsetVector, '%s.input3D[1]' % PVposition )
# finish
loc = cmds.spaceLocator( n=jnt[1].replace( 'JNT', 'pv_LOC' ) )
cmds.connectAttr( '%s.output3D' % PVposition, '%s.translate' % loc[0] )
homeN = HN.homeNul( loc[0] )
return homeN
|
[
"56536931+mappp7@users.noreply.github.com"
] |
56536931+mappp7@users.noreply.github.com
|
8a351a0a17dd9c9e820ad0a3ce4ed47c32bbad79
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/CMDy4pvnTZkFwJmmx_17.py
|
eb2c62d15f7a683e86ff3af378adbe04730786b4
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
"""
Create a class `Sudoku` that takes a **string** as an argument. The string
will contain the numbers of a regular 9x9 sudoku board **left to right and top
to bottom** , with zeros filling up the empty cells.
### Attributes
An instance of the class `Sudoku` will have one attribute:
* `board`: a list representing the board, with sublits for each **row** , with the numbers as **integers**. Empty cell represented with `0`.
### Methods
An instance of the class `Sudoku` wil have three methods:
* `get_row(n)`: will return the row in position `n`.
* `get_col(n)`: will return the column in position `n`.
* `get_sqr([n, m])`: will return the square in position `n` if only one argument is given, and the square to which the cell in position `(n, m)` belongs to if two arguments are given.
### Example

game = Sudoku("417950030000000700060007000050009106800600000000003400900005000000430000200701580")
game.board ➞ [
[4, 1, 7, 9, 5, 0, 0, 3, 0],
[0, 0, 0, 0, 0, 0, 7, 0, 0],
[0, 6, 0, 0, 0, 7, 0, 0, 0],
[0, 5, 0, 0, 0, 9, 1, 0, 6],
[8, 0, 0, 6, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 3, 4, 0, 0],
[9, 0, 0, 0, 0, 5, 0, 0, 0],
[0, 0, 0, 4, 3, 0, 0, 0, 0],
[2, 0, 0, 7, 0, 1, 5, 8, 0]
]
game.get_row(0) ➞ [4, 1, 7, 9, 5, 0, 0, 3, 0]
game.get_col(8) ➞ [0, 0, 0, 6, 0, 0, 0, 0, 0]
game.get_sqr(1) ➞ [9, 5, 0, 0, 0, 0, 0, 0, 7]
game.get_sqr(1, 8) ➞ [0, 3, 0, 7, 0, 0, 0, 0, 0]
game.get_sqr(8, 3) ➞ [0, 0, 5, 4, 3, 0, 7, 0, 1]
### Notes
* All positions are indexed to 0.
* All orders are assigned left to right and top to bottom.
"""
class Sudoku:
def __init__(self, board):
self.board = [[int(board[r*9+c]) for c in range(9)] for r in range(9)]
def get_row(self, n):
return self.board[n]
def get_col(self, n):
return [row[n] for row in self.board]
def get_sqr(self, n, m=None):
if m == None:
r, c = (n//3)*3, (n%3)*3
else:
r, c = (n//3)*3, (m//3)*3
return [self.board[r][c], self.board[r][c+1], self.board[r][c+2],
self.board[r+1][c], self.board[r+1][c+1], self.board[r+1][c+2],
self.board[r+2][c], self.board[r+2][c+1], self.board[r+2][c+2]]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
2dc0161a4729cda94a80a95977ad4a8515d70974
|
938a496fe78d5538af94017c78a11615a8498682
|
/algorithms/901-/1104.path-in-zigzag-labelled-binary-tree.py
|
2b2367b8413873d7a0cf319fc2d88320522f4c2d
|
[] |
no_license
|
huilizhou/Leetcode-pyhton
|
261280044d15d0baeb227248ade675177efdb297
|
6ae85bf79c5a21735e3c245c0c256f29c1c60926
|
refs/heads/master
| 2020-03-28T15:57:52.762162
| 2019-11-26T06:14:13
| 2019-11-26T06:14:13
| 148,644,059
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
# 二叉树寻路
class Solution(object):
def pathInZigZagTree(self, label):
"""
:type label: int
:rtype: List[int]
"""
"""
我们会发现一个规律,在偶数行,原索引和逆序后的索引值加在一起,
等于该行最小索引和最大索引的值(因为每一行都是一个等差数列),
而这个值也恰好等于该行最小索引值的3倍减去1(因为下一行开始的索引是前一行开始索引的2倍)。
"""
if label == 1:
return [label]
res = [label]
while label > 1:
res.append(label // 2)
label //= 2
res.reverse()
for i in range(1, len(res) - 1):
if (i + 1) % 2 != len(res) % 2:
res[i] = (3 * (2**i)) - 1 - res[i]
return res
print(Solution().pathInZigZagTree(14))
|
[
"2540278344@qq.com"
] |
2540278344@qq.com
|
727b274021936dbec7e423339760a2a165e22cd7
|
a70e4ba37ff2267b23a4d70282577f03086ab98d
|
/setup.py
|
0cd40d03a9499c1619570decacc6446dafc521f1
|
[
"MIT"
] |
permissive
|
i5o/xo-retroscope
|
b731b8511054a2b8144e85a9b545dea8d02d494b
|
0e61b8eb41828356e6a49402f1bdb93c285486f4
|
refs/heads/master
| 2016-09-05T14:58:04.088218
| 2014-01-03T19:39:47
| 2014-01-03T19:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
#!/usr/bin/env python
try:
from sugar3.activity import bundlebuilder
bundlebuilder.start()
except ImportError:
print "Error: sugar.activity.Bundlebuilder not found."
|
[
"ignacio@sugarlabs.org"
] |
ignacio@sugarlabs.org
|
01bf3ba6e3fdeb2cfdc75acfd7cae65d5ce05eba
|
15cb0ddd678abe1e1f7a905fab0305079bfc4007
|
/source/vsm-dashboard/vsm_dashboard/dashboards/vsm/monitor-status/tables.py
|
785a28bfaa382c1d8a76a769ccba4105a78ac32b
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ramkrsna/virtual-storage-manager
|
3563baf9763a0925af77cc13245e0896c20a2ced
|
78125bfb4dd4d78ff96bc3274c8919003769c545
|
refs/heads/master
| 2023-02-18T08:52:56.769486
| 2016-07-01T06:46:53
| 2016-07-01T06:46:53
| 63,155,952
| 0
| 0
|
NOASSERTION
| 2023-02-07T06:07:38
| 2016-07-12T12:27:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,595
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Intel Corporation, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import SortedDict
from django import forms
from django.utils.safestring import mark_safe
from horizon import tables
from horizon.utils import html
from horizon import exceptions
from vsm_dashboard.api import vsm as vsmapi
from .utils import checkbox_transform
STRING_SEPARATOR = "__"
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, id):
# todo update zone info in apiclient CEPH_LOG
try:
_zones = vsmapi.get_zone_list(request,)
except:
exceptions.handle(request,
_('Unable to retrieve sever list. '))
zones = {}
for _zone in _zones:
zones.setdefault(_zone.id, _zone.name)
_server = vsmapi.get_server(request, id)
server = {"id": _server.id,
"name": _server.host,
"primary_public_ip": _server.primary_public_ip,
"secondary_public_ip": _server.secondary_public_ip,
"cluster_ip": _server.cluster_ip,
"zone_id": _server.zone_id,
"zone": "",
"osds": _server.osds,
"type": _server.type,
"status": _server.status}
if "monitor" in _server.type:
server['is_monitor'] = "yes"
else:
server['is_monitor'] = "no"
if _server.zone_id in zones:
server['zone'] = zones[_server.zone_id]
return server
STATUS_DISPLAY_CHOICES = (
("resize", "Resize/Migrate"),
("verify_resize", "Confirm or Revert Resize/Migrate"),
("revert_resize", "Revert Resize/Migrate"),
)
class ListMonitorStatusTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("available", True),
("Active", True),
)
#server_id = tables.Column("id", verbose_name=_("ID"))
ordinal = tables.Column("id", verbose_name=_("ordinal"))
name = tables.Column("name", verbose_name=_("Name"))
address = tables.Column("address", verbose_name=_("Address"))
health = tables.Column("health", verbose_name=_("Health"))
details = tables.Column("details", verbose_name=_("Detail"))
skew = tables.Column("skew", verbose_name=_("Skew"))
latency = tables.Column("latency", verbose_name=_("Latency"))
kb_total = tables.Column("mb_total", verbose_name=_("MB Total (disk)"))
kb_used = tables.Column("mb_used", verbose_name=_("MB Used (disk)"))
kb_avail = tables.Column("mb_avail", verbose_name=_("MB Available (disk)"))
percent_avail = tables.Column("percent_avail", verbose_name=_("Percent Available"))
updated_at = tables.Column("updated_at", verbose_name=_("Updated at"), classes=("span2",))
class Meta:
name = "monitor_list"
verbose_name = _("Monitor List")
#status_columns = ['status']
row_class = UpdateRow
multi_select = False
def get_object_id(self, datum):
if hasattr(datum, "id"):
return datum.id
else:
return datum["id"]
def get_object_display(self, datum):
if hasattr(datum, "name"):
return datum.id
else:
return datum["name"]
def empty_value_maker(type, name, value, attrs=None):
def _empty_value_caller(datum):
if type == "text":
widget = forms.TextInput()
elif type == "choice":
widget = forms.ChoiceField().widget
elif type == "checkbox":
widget = forms.CheckboxInput()
data = dict(name=name, value=value)
if name in datum.keys():
data.update(datum[name])
if attrs:
data.update(dict(attrs=attrs))
data = widget.render(**data)
return data
return _empty_value_caller
|
[
"yaguang.wang@intel.com"
] |
yaguang.wang@intel.com
|
75aa59ccac8e96f75f760fc5720ee19d5dbb3fc4
|
26771494974942f4ab18d2cd8247506c344e1d14
|
/133-v2-cloneGraph.py
|
da32838a0365c7696c37221dfa93c8d27f7d203e
|
[] |
no_license
|
wangyunpengbio/LeetCode
|
9f4c6076e067c5e847d662679483f737d40e8ca5
|
cec1fd11fe43177abb2d4236782c0f116e6e8bce
|
refs/heads/master
| 2020-04-29T22:28:25.899420
| 2020-04-03T07:37:26
| 2020-04-03T07:37:26
| 176,448,957
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
"""
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
# 深度优先遍历 非递归
if node == None :return None
resNode2CopyNode = {}
stack = [node]
copy = Node(node.val,None)
resNode2CopyNode[node] = copy
while stack:
current = stack.pop()
neighbors = current.neighbors
if neighbors == None:continue # 原来图里该节点就没有邻居,直接跳过
copyNode = resNode2CopyNode[current]
if copyNode.neighbors == None:
copyNode.neighbors = []
# 遍历当前节点的全部邻居,把“当前节点的拷贝”的邻居list也拷贝好,遇到新邻居:创建新节点,新节点放到stack中;遇到旧邻居:直接从dic中拿节点
for nei in neighbors:
if nei in resNode2CopyNode:
copyneighbor = resNode2CopyNode[nei]
else:
copyneighbor = Node(nei.val,None)
resNode2CopyNode[nei] = copyneighbor
stack.append(nei)
copyNode.neighbors.append(copyneighbor)
return copy
|
[
"wangyunpeng_bio@qq.com"
] |
wangyunpeng_bio@qq.com
|
0f61efa724ff6f8c229649cf3b50c92d8bd7b5b1
|
7ba22c9826a1574777a08fb634ff15c56de6cb98
|
/syntaxnet/dragnn/tools/evaluator.py
|
ae60e5d4f8beeb1996dd1633c94b9a5e2710e180
|
[] |
no_license
|
dhanya1/full_cyclist
|
02b85b8331f8ca9364169484ab97b32920cbbd14
|
dd12c8d8a3deaaea15041e54f2e459a5041f11c2
|
refs/heads/master
| 2022-10-17T13:36:51.886476
| 2018-07-30T15:46:02
| 2018-07-30T15:46:02
| 142,896,293
| 0
| 1
| null | 2022-10-05T10:11:01
| 2018-07-30T15:46:15
|
Python
|
UTF-8
|
Python
| false
| false
| 6,917
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a DRAGNN model on a given set of CoNLL-formatted sentences.
Sample invocation:
bazel run -c opt <...>:evaluator -- \
--master_spec="/path/to/master-spec" \
--checkpoint_file="/path/to/model/name.checkpoint" \
--input_file="/path/to/input/documents/test.connlu"
"""
import os
import re
import time
from absl import flags
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.client import timeline
from tensorflow.python.platform import gfile
from dragnn.protos import spec_pb2
from dragnn.python import evaluation
from dragnn.python import graph_builder
from dragnn.python import sentence_io
from dragnn.python import spec_builder
from syntaxnet import sentence_pb2
FLAGS = flags.FLAGS
flags.DEFINE_string('master_spec', '',
'Path to text file containing a DRAGNN master spec to run.')
flags.DEFINE_string('resource_dir', '',
'Optional base directory for resources in the master spec.')
flags.DEFINE_bool('complete_master_spec', False, 'Whether the master_spec '
'needs the lexicon and other resources added to it.')
flags.DEFINE_string('checkpoint_file', '', 'Path to trained model checkpoint.')
flags.DEFINE_string('input_file', '',
'File of CoNLL-formatted sentences to read from.')
flags.DEFINE_string('output_file', '',
'File path to write annotated sentences to.')
flags.DEFINE_integer('max_batch_size', 2048, 'Maximum batch size to support.')
flags.DEFINE_string('inference_beam_size', '', 'Comma separated list of '
'component_name=beam_size pairs.')
flags.DEFINE_string('locally_normalize', '', 'Comma separated list of '
'component names to do local normalization on.')
flags.DEFINE_integer('threads', 10, 'Number of threads used for intra- and '
'inter-op parallelism.')
flags.DEFINE_string('timeline_output_file', '', 'Path to save timeline to. '
'If specified, the final iteration of the evaluation loop '
'will capture and save a TensorFlow timeline.')
flags.DEFINE_string('log_file', '', 'File path to write parser eval_bkp results.')
flags.DEFINE_string('language_name', '_', 'Name of language being parsed, '
'for logging.')
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Parse the flags containint lists, using regular expressions.
# This matches and extracts key=value pairs.
component_beam_sizes = re.findall(r'([^=,]+)=(\d+)',
FLAGS.inference_beam_size)
# This matches strings separated by a comma. Does not return any empty
# strings.
components_to_locally_normalize = re.findall(r'[^,]+',
FLAGS.locally_normalize)
# Reads master spec.
master_spec = spec_pb2.MasterSpec()
with gfile.FastGFile(FLAGS.master_spec) as fin:
text_format.Parse(fin.read(), master_spec)
# Rewrite resource locations.
if FLAGS.resource_dir:
for component in master_spec.component:
for resource in component.resource:
for part in resource.part:
part.file_pattern = os.path.join(FLAGS.resource_dir,
part.file_pattern)
if FLAGS.complete_master_spec:
spec_builder.complete_master_spec(master_spec, None, FLAGS.resource_dir)
# Graph building.
tf.logging.info('Building the graph')
g = tf.Graph()
with g.as_default(), tf.device('/device:CPU:0'):
hyperparam_config = spec_pb2.GridPoint()
hyperparam_config.use_moving_average = True
builder = graph_builder.MasterBuilder(master_spec, hyperparam_config)
annotator = builder.add_annotation()
builder.add_saver()
tf.logging.info('Reading documents...')
input_corpus = sentence_io.ConllSentenceReader(FLAGS.input_file).corpus()
session_config = tf.ConfigProto(
log_device_placement=False,
intra_op_parallelism_threads=FLAGS.threads,
inter_op_parallelism_threads=FLAGS.threads)
with tf.Session(graph=g, config=session_config) as sess:
tf.logging.info('Initializing variables...')
sess.run(tf.global_variables_initializer())
tf.logging.info('Loading from checkpoint...')
sess.run('save/restore_all', {'save/Const:0': FLAGS.checkpoint_file})
tf.logging.info('Processing sentences...')
processed = []
start_time = time.time()
run_metadata = tf.RunMetadata()
for start in range(0, len(input_corpus), FLAGS.max_batch_size):
end = min(start + FLAGS.max_batch_size, len(input_corpus))
feed_dict = {annotator['input_batch']: input_corpus[start:end]}
for comp, beam_size in component_beam_sizes:
feed_dict['%s/InferenceBeamSize:0' % comp] = beam_size
for comp in components_to_locally_normalize:
feed_dict['%s/LocallyNormalize:0' % comp] = True
if FLAGS.timeline_output_file and end == len(input_corpus):
serialized_annotations = sess.run(
annotator['annotations'], feed_dict=feed_dict,
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open(FLAGS.timeline_output_file, 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format())
else:
serialized_annotations = sess.run(
annotator['annotations'], feed_dict=feed_dict)
processed.extend(serialized_annotations)
tf.logging.info('Processed %d documents in %.2f seconds.',
len(input_corpus), time.time() - start_time)
pos, uas, las = evaluation.calculate_parse_metrics(input_corpus, processed)
if FLAGS.log_file:
with gfile.GFile(FLAGS.log_file, 'w') as f:
f.write('%s\t%f\t%f\t%f\n' % (FLAGS.language_name, pos, uas, las))
if FLAGS.output_file:
with gfile.GFile(FLAGS.output_file, 'w') as f:
for serialized_sentence in processed:
sentence = sentence_pb2.Sentence()
sentence.ParseFromString(serialized_sentence)
f.write(text_format.MessageToString(sentence) + '\n\n')
if __name__ == '__main__':
tf.app.run()
|
[
"dhanyasj01@gmail.com"
] |
dhanyasj01@gmail.com
|
787e0215434095aa0b3afec689844c5bea7ff1fc
|
82c73b70c2002f647bdc254125f0bdb18f0b79d2
|
/openstack_dashboard/dashboards/admin/volumes/urls.py
|
4a6a4c23b4007ad736bdeacbe29791aa6810287d
|
[
"Apache-2.0"
] |
permissive
|
xuweiliang/Codelibrary
|
cfb5755ced54c65cacdb3e35ab2b98385f8d5f8e
|
54e45b2daa205132c05b0ff5a2c3db7fca2853a7
|
refs/heads/master
| 2021-05-04T00:31:42.025238
| 2018-03-20T07:05:20
| 2018-03-20T07:05:20
| 71,852,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include # noqa
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.volumes.backups \
import urls as backups_urls
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import urls as snapshot_urls
from openstack_dashboard.dashboards.admin.volumes import views
from openstack_dashboard.dashboards.admin.volumes.volumes \
import urls as volume_urls
from openstack_dashboard.dashboards.admin.volumes.volume_types \
import urls as volume_types_urls
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^\?tab=volumes_and_snapshots__snapshots_tab$',
views.IndexView.as_view(), name='snapshots_tab'),
url(r'^\?tab=volumes_and_snapshots__volumes_tab$',
views.IndexView.as_view(), name='volumes_tab'),
url(r'^\?tab=volumes_and_snapshots__backups_tab$',
views.IndexView.as_view(), name='backups_tab'),
url(r'', include(volume_urls, namespace='volumes')),
url(r'backups/', include(backups_urls, namespace='backups')),
url(r'snapshots/', include(snapshot_urls, namespace='snapshots')),
url(r'^\?tab=volumes_group_tabs__volume_types_tab$',
views.IndexView.as_view(),
name='volume_types_tab'),
url(r'volume_types/',
include(volume_types_urls, namespace='volume_types')),
)
|
[
"root@newton.com"
] |
root@newton.com
|
a88e8898471f969bb19e173853e19c315c95f494
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/perm_20200622013142.py
|
721353b76d5da3353229c7518531f035e0fa2219
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
def sequence(n,k):
newArr = []
for i in range(1,n+1):
newArr.append(i)
print(newArr)
sequence(3,3)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
dfe31c4e723e17ac685d9e2a451abd83f0774db5
|
b0cdbad299f6174bfdb0fba173dbcf3889b82209
|
/Modules/sys/38_sys.py
|
09e80a1a040cbbf4490bac86fe593db7821a7af8
|
[] |
no_license
|
deesaw/PythonD-06
|
a33e676f1e0cfc13b4ea645c8b60547b198239ac
|
3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa
|
refs/heads/master
| 2023-03-18T08:24:42.030935
| 2021-03-02T14:15:09
| 2021-03-02T14:15:09
| 343,797,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
import sys
print("Length:",len(sys.argv))
if (len(sys.argv)>2):
sum=0
for i in sys.argv[1:]:
sum=sum+int(i)
print(sum)
else:
total=0
no_Values=int(input("How many values has to be added:"))
for val in range(no_Values):
print(val)
num=int(input("Enter Value:"))
total+=num
print(total)
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
29958a6140724765938a648ad8144e723a3f67dc
|
fecc1daf3ee945191dee561dd501e9e17a36685d
|
/projectile.py
|
77b4eb35dcd119c7c25ba9c514b258c87ce31e60
|
[] |
no_license
|
tt-n-walters/tt19-pytue-game
|
856d9bb4a2c4260d88b1ef6fb63426f648c4808f
|
2fe4ca47180b617f0d1d72046753fa5e914a2809
|
refs/heads/master
| 2022-10-23T11:35:03.150595
| 2020-06-16T17:03:49
| 2020-06-16T17:03:49
| 267,097,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
from arcade import Sprite, draw_rectangle_filled, color
from math import sin, cos, radians
class Projectile(Sprite):
def __init__(self, image_filename, direction, speed):
super().__init__(filename=image_filename, scale=4)
self.angle = direction
self.change_x = speed * cos(radians(direction))
self.change_y = speed * sin(radians(direction))
class SmallBullet(Projectile):
def __init__(self, gun_x, gun_y, direction, speed):
super().__init__("assets/bullets/bulletDark2.png", direction, speed)
self.center_x = gun_x
self.center_y = gun_y
self.width = self.width / 2
self.height = self.height / 2
self.angle = -90
|
[
"nico.walters@techtalents.es"
] |
nico.walters@techtalents.es
|
44851354a019a77a82c2a8e957f0ee79172d10cd
|
2fff43f976e55c31e448e56b2809c36a0b154684
|
/blog/views.py
|
b2aac913895a07b61be54fae4394fae8a9ac7c18
|
[] |
permissive
|
omar115/first_blog_application
|
60f48c859f7b2d5be30f6d4abc34564b2dc7cd08
|
c87ae74bdeabc72fc12162528a966ef1295184e6
|
refs/heads/main
| 2023-02-16T12:20:24.513085
| 2021-01-14T11:57:00
| 2021-01-14T11:57:00
| 329,451,779
| 0
| 0
|
MIT
| 2021-01-14T10:20:55
| 2021-01-13T22:56:54
| null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from django.shortcuts import render
# Create your views here.
def post_list(request):
return render(request, 'blog/post_list.html', {})
|
[
"omarhasan115@gmail.com"
] |
omarhasan115@gmail.com
|
2fad9418b56e80ca01ab03f50a5629b955b26ddb
|
e1efc8e0b0e4629dea61504fbc816c0527691bd9
|
/8.thread线程/1.线程基础/1_线程基本概念.py
|
63dc04d2e753d0495c412dff0504ed2dee2325fc
|
[] |
no_license
|
xiongmengmeng/xmind-technology
|
2bb67a0bf92cfd660cac01f8ab3a2454423ccba5
|
e2fdb6987ef805a65f0a4feb52d84383853f4b77
|
refs/heads/main
| 2023-07-31T07:10:29.868120
| 2021-09-11T08:18:17
| 2021-09-11T08:18:17
| 307,636,242
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,390
|
py
|
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="thread"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("线程基本概念")
r2=s2.getRootTopic()
r2.setTitle("线程基本概念")
content={
'进程':[
'操作系统管理的基本运行单元',
'当一个程序被运行,从磁盘加载程序的代码到内存,就开启了一个进程'
'如:一个正在运行的exe程序'
],
'线程':[
'一个指令流,将指令流中的一条条指令以一定的顺序交给CPU处理',
'进程中独立运行的子任务',
'最大限度地利用CPU空闲时间处理任务'
],
'并行与并发':[
'单核CPU下,线程串行执行',
{'任务调度器':[
'将cpu的时间片(15毫秒)分给不同的程序使用',
'由于cpu在线程间(时间片)的切换很快,感觉是同时运行的'
]},
{'并发':[
'线程轮流使用cpu,实际是串行的'
]},
{'并行':[
'多核cpu下,每个核都可调试运行线程'
]}
],
'多线程':[
{'异步':[
'代码运行结果与代码执行或调用顺序无关'
]},
{'实现方式':[
'继承Thread类',
'实现Runnable接口',
'实现Callable接囗(FutureTask接收返回值)'
]},
{'Future接口':[
'获取异步计算结果',
]},
{'FutureTask类':[
'Future接口的实现类',
]}
],
'非线程安全':[
'多个线程对同一个对象中的同一个实例变量进行操作->出现值被更改、不同步的情况->影响程序的执行流程',
{'分类':[
'成员变量:共享的,有读写操作的',
'局部变量:引用对象逃离方法作用范围'
]},
'线程安全包含原子性和可见性'
],
# 'Timer定时器类':[
# {'1.Timer类':[
# '设置计划任务,TimeTask类:封闭计划任务'
# ]},
# {'2.Schedule(TimeTask timeTask,Date time)在指定时间执行一次某任务':[
# '一个timer可运行多个TimeTask',
# 'TimeTask以队列方式一个一个被顺序执行',
# '执行的时间可能跟预计不一致(单线程执行)'
# ]},
# {'3.Schedule(TimeTask timeTask,Date firstTime,long period)':[
# '指定日期后,按指定间隔周期性无限循环地执行某一任务'
# ]}
# ],
'多线程下的单例':[
{'立即加载':[
'使用类时已将对象创建完毕,不存在线程安全问题',
'类加载的准备阶段为类变量分配空间,设初始值,初始化阶段为类变量赋值'
]},
{'延迟加载':[
'兼顾效率与线程安全性,使用DCL双检查锁机制:volatile+synchronized',
'private volatile static MyObject myObject;'
'....',
'synchronized (MyObject.class) {',
' if (object == null) {',
' object = new MyObject();',
' }',
'}',
{'静态内置类实现':[
'类加载的初始化阶段会执行类的静态语句块'
]}
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
[
"xiongmengmeng@qipeipu.com"
] |
xiongmengmeng@qipeipu.com
|
39dd022361eeff4b26dc76375bafd21c5b91e869
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Simulation/ISF/ISF_Geant4/ISF_Geant4CommonTools/python/ISF_Geant4CommonToolsConfigDb.py
|
57bc594f8edc61ff1211ce0259aa17ceaa7dbae2
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
"""
Configuration database for ISF_Geant4CommonTools
Elmar Ritsch, 31/10/2014
"""
from AthenaCommon.CfgGetter import addTool, addToolClone, addService, addAlgorithm, \
addTypesToExcludeIfDefaultValue, addNamesToExcludeIfDefaultValue, addFullNamesToExcludeIfDefaultValue, \
addPropertiesToExcludeIfDefault, \
addTypesToSkipIfNotAvailable, addNamesToSkipIfNotAvailable, addFullNamesToSkipIfNotAvailable, \
addTypesOnlyToSkip
from AthenaCommon.Constants import * # FATAL,ERROR etc.
import AthenaCommon.SystemOfUnits as Units
# Common tools, services and algorithms used by jobs
addTool("ISF_Geant4CommonTools.ISF_Geant4CommonToolsConfig.getEntryLayerTool", "ISF_EntryLayerTool")
addTool("ISF_Geant4CommonTools.ISF_Geant4CommonToolsConfig.getAFIIEntryLayerTool", "ISF_AFIIEntryLayerTool")
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
4c5c9b5b065e80fca6d1741d5b52a87f50e94787
|
b8ba0f496b3e89af32c11503b8bb87b1917c4c36
|
/mutant/__init__.py
|
862b1670352ecd904a7b646a269bdae676ea1330
|
[
"MIT"
] |
permissive
|
torchingloom/django-mutant
|
21a3bbb076668c88e855725e74163442810e4817
|
7bf396071f22c7339098b7ec57e0629750cf57c8
|
refs/heads/master
| 2021-01-21T20:19:04.029964
| 2016-01-14T05:02:13
| 2016-01-14T05:02:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
from __future__ import unicode_literals
import logging
from django.utils.version import get_version
VERSION = (0, 2, 2, 'alpha', 0)
__version__ = get_version(VERSION)
logger = logging.getLogger('mutant')
default_app_config = 'mutant.apps.MutantConfig'
|
[
"charette.s@gmail.com"
] |
charette.s@gmail.com
|
1d5a1ceca4a37df1c9413d3bd9f77be6dcc74c75
|
bb93784aad5933329118cc2ed86357045e535c51
|
/setup.py
|
d92ebe3852c13ffa729b201b45376444bae1511e
|
[] |
no_license
|
garaemon/pr_style_review
|
3f69ddee8a93d3422955fa96f42c754a4c3c1a43
|
2ae6e400ae68746fc6d385f642d01cbaaa9c19c2
|
refs/heads/master
| 2020-04-24T22:11:07.173809
| 2019-02-28T16:01:57
| 2019-02-28T16:01:57
| 172,303,383
| 0
| 0
| null | 2019-02-28T16:01:58
| 2019-02-24T06:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 135
|
py
|
from setuptools import setup
setup(
name='pr_style_review',
version='0.0.0',
install_requires=['GitPython', 'github3.py'])
|
[
"garaemon@gmail.com"
] |
garaemon@gmail.com
|
e53912616396c13d4e09af02972f7af0a5d56051
|
babff7df289cb7173a22be1f68feec51f71d9269
|
/manage.py
|
0436a4c994071feac690f1a81989b12038390b25
|
[
"MIT"
] |
permissive
|
jwestgard/prange-db
|
9d1a6817dd9f94d8a4dc380cefe8846dd8b20312
|
27535271cd902d18673c187f4277e47327563556
|
refs/heads/master
| 2021-01-10T21:20:50.583769
| 2015-10-11T17:21:04
| 2015-10-11T17:21:04
| 42,461,824
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
#!/usr/bin/env python
import os
from app import create_app, db
from app.models import User, Role
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
|
[
"jwestgard@mac.com"
] |
jwestgard@mac.com
|
79c9b3bb46711849e3535454e5043208d663f50b
|
c1f732ebeceb8c4103454f8ed8c5be3f02589b3f
|
/run.py
|
571c2e31c096ff2f91563919165a8d25630100b9
|
[
"MIT"
] |
permissive
|
Roychela/Password-Locker
|
828cab4ba678f11beeee602cf2a475e52e45e147
|
67e16580ea9283ede593c5cf6eadcfde877a70d2
|
refs/heads/master
| 2020-06-04T06:01:37.011996
| 2019-06-17T12:23:38
| 2019-06-17T12:23:38
| 191,897,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,172
|
py
|
#!/usr/bin/env python3.6
from user_credentials import User
from user_credentials import Credentials
def create_user(uname,password):
'''
Function to create a new user
'''
new_user = User(uname, password)
return new_user
def save_user(user):
'''
Function to save a new user
'''
user.save_user()
def authenticate_user(username, password):
'''
Function to authenticate a user
'''
authenticated_user = Credentials.confirm_login(username, password)
return authenticated_user
def create_credential(user_name,site_name,account_name,password):
'''
Function to create a new credential object
'''
new_credential = Credentials(user_name,site_name,account_name,password)
return new_credential
def save_credential(credential):
'''
Function to save a created credential
'''
Credentials.save_credentials(credential)
def generate_password():
'''
Function to randomly generate password
'''
passwrd_generated = Credentials.generate_password()
return passwrd_generated
def display_credentials(user_name):
'''
Function to display credentials
'''
return Credentials.display_credentials(user_name)
def copy_credential(site_name):
'''
Function to copy a credential to the clipboard
'''
return Credentials.copy_credential(site_name)
def main():
print(' ')
print('Hello! Welcome to Password Locker.')
while True:
print(' ')
print("-"*40)
print('Use these short codes: ca-Create Password-Locker account, log-Login, ex-Exit')
short_code = input('Enter short code here: ').lower().strip()
if short_code == 'ex':
break
elif short_code == 'ca':
print("-"*40)
print(' ')
print('To create a new account:')
username = input('Choose a username - ').strip()
password = input('Choose a password - ').strip()
save_user(create_user(username,password))
print(" ")
print(f'Your Password-Locker account username is : {username} and password is: {(password)}')
elif short_code == 'log':
print("-"*40)
print(' ')
print('To login:')
user_name = input('Enter your Password-Locker username - ').strip()
password = str(input('Enter your password - '))
user_authenticated = authenticate_user(user_name,password)
if user_authenticated == user_name:
print(" ")
print(f'Welcome {user_name}. You have successfully logged in. Choose short code to continue')
print(' ')
while True:
print("-"*40)
print('Your credentials short codes: ccd-Create credential, dc-Display Credentials, dl-delete credentials account, cp-Copy Password, ex-Exit')
short_code = input('Enter short code: ').lower().strip()
print("-"*40)
if short_code == 'ex':
print(" ")
print(f'Goodbye {user_name}')
break
elif short_code == 'ccd':
print(' ')
print('Enter your credential account information:')
site_name = input('Enter the site name- ').strip()
account_name = input('Enter your account name - ').strip()
while True:
print(' ')
print("-"*40)
print('Select option for entering a password: ep-Enter your own password, gp-Generate a password ,ex-Exit')
passwrd_select = input('Enter an option: ').lower().strip()
print("-"*40)
if passwrd_select == 'ep':
print(" ")
password = input('Enter your password: ').strip()
break
elif passwrd_select == 'gp':
password = generate_password()
break
elif passwrd_select == 'ex':
break
else:
print('Incorrect entry. Try again.')
save_credential(create_credential(user_name,site_name,account_name,password))
print(' ')
print(f'Credential Created: Site Name: {site_name} - Account Name: {account_name} - Password: {password}')
print(' ')
elif short_code == 'dc':
print(' ')
if display_credentials(user_name):
print('Your credentials account list:')
print(' ')
for credential in display_credentials(user_name):
print(f'Site Name: {credential.site_name} - Account Name: {credential.account_name} - Password: {credential.password}')
print(' ')
else:
print(' ')
print("No credentials saved")
print(' ')
elif short_code == 'cp':
print(' ')
chosen_site = input('Enter the site name for the credential password to copy: ')
copy_credential(chosen_site)
print('')
print('Paste copied site_name password here:')
copy = input()
else:
print('Incorrect entry.Try again.')
else:
print(' ')
print('Incorrect entry. Try again or Create an Account.')
else:
print("-"*40)
print(' ')
print('Incorrect entry. Try again.')
if __name__ == "__main__":
main()
|
[
"roychela@gmail.com"
] |
roychela@gmail.com
|
5934915c4f56931289cac74101259879de684988
|
544cfadc742536618168fc80a5bd81a35a5f2c99
|
/tools/acloud/setup/setup.py
|
c424318b97c03083e60d2b51c27384c8c788fcfc
|
[
"Apache-2.0"
] |
permissive
|
ZYHGOD-1/Aosp11
|
0400619993b559bf4380db2da0addfa9cccd698d
|
78a61ca023cbf1a0cecfef8b97df2b274ac3a988
|
refs/heads/main
| 2023-04-21T20:13:54.629813
| 2021-05-22T05:28:21
| 2021-05-22T05:28:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,732
|
py
|
# Copyright 2018 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Setup entry point.
Setup will handle all of the necessary steps to enable acloud to create a local
or remote instance of an Android Virtual Device.
"""
from __future__ import print_function
import os
import subprocess
import sys
from acloud.internal import constants
from acloud.internal.lib import utils
from acloud.public import config
from acloud.setup import host_setup_runner
from acloud.setup import gcp_setup_runner
def Run(args):
"""Run setup.
Setup options:
-host: Setup host settings.
-gcp_init: Setup gcp settings.
-None, default behavior will setup host and gcp settings.
Args:
args: Namespace object from argparse.parse_args.
"""
if args.update_config:
_UpdateConfig(args.config_file, args.update_config[0], args.update_config[1])
return
_RunPreSetup()
# Setup process will be in the following manner:
# 1.Print welcome message.
_PrintWelcomeMessage()
# 2.Init all subtasks in queue and traverse them.
host_base_runner = host_setup_runner.HostBasePkgInstaller()
host_avd_runner = host_setup_runner.AvdPkgInstaller()
host_cf_common_runner = host_setup_runner.CuttlefishCommonPkgInstaller()
host_env_runner = host_setup_runner.CuttlefishHostSetup()
gcp_runner = gcp_setup_runner.GcpTaskRunner(args.config_file)
task_queue = []
# User must explicitly specify --host to install the avd host packages.
if args.host:
task_queue.append(host_base_runner)
task_queue.append(host_avd_runner)
task_queue.append(host_cf_common_runner)
task_queue.append(host_env_runner)
# We should do these setup tasks if specified or if no args were used.
if args.host_base or (not args.host and not args.gcp_init):
task_queue.append(host_base_runner)
if args.gcp_init or (not args.host and not args.host_base):
task_queue.append(gcp_runner)
for subtask in task_queue:
subtask.Run(force_setup=args.force)
# 3.Print the usage hints.
_PrintUsage()
def _PrintWelcomeMessage():
"""Print welcome message when acloud setup been called."""
# pylint: disable=anomalous-backslash-in-string
asc_art = " \n" \
" ___ _______ ____ __ _____ \n" \
" / _ |/ ___/ / / __ \/ / / / _ \\ \n" \
" / __ / /__/ /__/ /_/ / /_/ / // / \n" \
"/_/ |_\\___/____/\\____/\\____/____/ \n" \
" \n"
print("\nWelcome to")
print(asc_art)
def _PrintUsage():
"""Print cmd usage hints when acloud setup been finished."""
utils.PrintColorString("")
utils.PrintColorString("Setup process finished")
def _RunPreSetup():
"""This will run any pre-setup scripts.
If we can find any pre-setup scripts, run it and don't care about the
results. Pre-setup scripts will do any special setup before actual
setup occurs (e.g. copying configs).
"""
if constants.ENV_ANDROID_BUILD_TOP not in os.environ:
print("Can't find $%s." % constants.ENV_ANDROID_BUILD_TOP)
print("Please run '#source build/envsetup.sh && lunch <target>' first.")
sys.exit(constants.EXIT_BY_USER)
pre_setup_sh = os.path.join(os.environ.get(constants.ENV_ANDROID_BUILD_TOP),
"tools",
"acloud",
"setup",
"pre_setup_sh",
"acloud_pre_setup.sh")
if os.path.exists(pre_setup_sh):
subprocess.call([pre_setup_sh])
def _UpdateConfig(config_file, field, value):
"""Update the user config.
Args:
config_file: String of config file path.
field: String, field name in user config.
value: String, the value of field.
"""
config_mgr = config.AcloudConfigManager(config_file)
config_mgr.Load()
user_config = config_mgr.user_config_path
print("Your config (%s) is updated." % user_config)
gcp_setup_runner.UpdateConfigFile(user_config, field, value)
_PrintUsage()
|
[
"rick_tan@qq.com"
] |
rick_tan@qq.com
|
02d209733f4ae22362411e874febaf105049cc1f
|
6ad2fb13c42b6bb483189b0931bcca8bb117b5dc
|
/tests/ci/unittests/sdk/internal/agent/agent_client_test.py
|
e547c5dfca11df49d80a961c5e0ba687426d719c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
woodywan/python-sdk
|
aac2a2527f07c8900a01b4336f890c603a1c8d4c
|
b8583a8abf3bdc1f978fad6f692e980de00bc7ea
|
refs/heads/master
| 2023-01-14T03:23:12.291230
| 2020-11-20T15:18:41
| 2020-11-20T15:18:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,223
|
py
|
# Copyright 2020 TestProject (https://testproject.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import responses
from src.testproject.rest.messages.agentstatusresponse import AgentStatusResponse
from src.testproject.sdk.exceptions import SdkException, AgentConnectException
from src.testproject.sdk.internal.agent import AgentClient
from src.testproject.helpers import ConfigHelper
@pytest.fixture()
def mocked_agent_address(mocker):
# Mock the Agent address
mocker.patch.object(ConfigHelper, "get_agent_service_address")
ConfigHelper.get_agent_service_address.return_value = "http://localhost:9876"
@responses.activate
def test_get_agent_status_no_response_raises_sdkexception(mocked_agent_address):
# Mock the response returned by the Agent when retrieving the address
responses.add(responses.GET, "http://localhost:9876/api/status", status=200)
with pytest.raises(SdkException) as sdke:
AgentClient.get_agent_version(token="1234")
assert (
str(sdke.value)
== "Could not parse Agent status response: no JSON response body present"
)
@responses.activate
def test_get_agent_status_response_without_tag_element_raises_sdkexception(
mocked_agent_address,
):
# Mock the response returned by the Agent when retrieving the address
responses.add(
responses.GET,
"http://localhost:9876/api/status",
json={"key": "value"},
status=200,
)
with pytest.raises(SdkException) as sdke:
AgentClient.get_agent_version(token="1234")
assert (
str(sdke.value)
== "Could not parse Agent status response: element 'tag' not found in JSON response body"
)
@responses.activate
def test_get_agent_status_response_with_error_http_status_code_raises_agentconnectexception(
mocked_agent_address,
):
# Mock the response returned by the Agent when retrieving the address
responses.add(responses.GET, "http://localhost:9876/api/status", status=500)
with pytest.raises(AgentConnectException) as ace:
AgentClient.get_agent_version(token="1234")
assert (
str(ace.value) == "Agent returned HTTP 500 when trying to retrieve Agent status"
)
@responses.activate
def test_get_agent_status_response_with_tag_element_creates_agentstatusresponse(
mocked_agent_address,
):
# Mock the response returned by the Agent when retrieving the address
responses.add(
responses.GET,
"http://localhost:9876/api/status",
json={"tag": "1.2.3"},
status=200,
)
agent_status_response: AgentStatusResponse = AgentClient.get_agent_version(
token="1234"
)
assert agent_status_response.tag == "1.2.3"
|
[
"bas@ontestautomation.com"
] |
bas@ontestautomation.com
|
9aebea7c51c967d87dbf4f648217c77a7eb52dda
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/~inactive/r_linuxmemes/app.py
|
7c7c338a5ee9f373aadc9306f10466ee14873922
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 143
|
py
|
#encoding:utf-8
subreddit = 'linuxmemes'
t_channel = '@r_linuxmemes'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
[
"git@fillll.ru"
] |
git@fillll.ru
|
9faffe3153372843ef972e1783f126a5c1a982cf
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/structformer/utils.py
|
5bfcdbe7fcd76e6958692bae860d3f0ecf877937
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434
| 2021-09-30T20:59:01
| 2021-09-30T21:07:02
| 156,725,548
| 1
| 0
|
Apache-2.0
| 2018-11-08T15:13:53
| 2018-11-08T15:13:52
| null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utils for training."""
import random
import numpy
import torch
def batchify(idxs, bsz, device, pad=0, shuffle=True):
"""Batchify the training data."""
length = [len(seq) for seq in idxs]
sorted_idx = numpy.argsort(length)
idxs_sorted = [idxs[i] for i in sorted_idx]
idxs_batched = []
i = 0
def get_batch(source, i, batch_size, pad=0):
total_length = 0
data = []
while total_length < batch_size and i < len(source):
data.append(source[i])
total_length += len(source[i])
i += 1
length = [len(seq) for seq in data]
max_l = max(length)
data_padded = []
for seq in data:
data_padded.append(seq + [pad] * (max_l - len(seq)))
data_mat = torch.LongTensor(data_padded).to(device)
return data_mat
while i < len(idxs_sorted):
idxs_batched.append(get_batch(idxs_sorted, i, bsz, pad))
i += idxs_batched[-1].size(0)
if shuffle:
sentence_idx = list(range(len(idxs_batched)))
random.shuffle(sentence_idx)
idxs_batched = [idxs_batched[i] for i in sentence_idx]
return idxs_batched
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
335a739b77ad9f9f6b858847ac0bd3526d3b033c
|
2ed2dd917afb05d194e87f989d78953b31a5781b
|
/lesson8/mission1.py
|
c0a03c9b4c67ea4473ebbb1e9fc2e4bbd98d5c46
|
[] |
no_license
|
RenegaDe1288/pythonProject
|
4058d549db7c37652f77438c31f8b31476497d98
|
801c06f3be22ed63214987b11d6f1b3fd2fe5b44
|
refs/heads/master
| 2023-08-17T13:20:50.777842
| 2021-10-05T10:51:00
| 2021-10-05T10:51:00
| 393,145,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
import random
cereal = random.randint(20, 100)
print('Всего кг гречки = ', cereal)
for num in range(1, 10):
if cereal >= 4:
cereal -= 4
print('Месяц = ', num)
print('На конец месяца Осталось гречки', cereal)
else:
print('Вы здохли на месяце ', num)
break
|
[
"D121188@yandex.ru"
] |
D121188@yandex.ru
|
801079f7f2054c5a86f12bc6b180ae002b113965
|
de9b8b7192a0a81e9249823bb2b86f0b7e452863
|
/.history/main_20171106225556.py
|
2fd686058e6c52f591c4b1270ab859ab28dbd2df
|
[
"MIT"
] |
permissive
|
reecebenson/uwe-dadsa-tennis-a
|
f5eaeb1b96d4e61f29279514e68eeea8ad6533db
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
refs/heads/master
| 2023-07-08T16:13:23.963348
| 2017-11-30T12:07:01
| 2017-11-30T12:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,361
|
py
|
# DADSA - Assignment 1
# Reece Benson
import random
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list
def generate_rounds(self):
# Let's generate our random rounds from scratch
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
rnd_players = random.sample(players[gender], len(players[gender]))
x = 0
for i in range(len(rnd_players)):
# Have we exceeded our index? (prone to IndexError?)
if(x > (len(rnd_players) + 2 / 2)):
break
# Grab our versus players
playerOne = rnd_players[x]
playerTwo = rnd_players[x + 1]
print("{0} vs {1} ".format(playerOne.name(), playerTwo.name()))
# Increment by 2 to avoid having duplicates
x += 2
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App()
|
[
"me@reecebenson.me"
] |
me@reecebenson.me
|
c7c12ce39667b16703c21aca62a7e62b8faaaf14
|
df7f13ec34591fe1ce2d9aeebd5fd183e012711a
|
/hata/ext/plugin_loader/__init__.py
|
69f22e8d027110d53bfe40fdda2a90f6223048a0
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
HuyaneMatsu/hata
|
63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e
|
53f24fdb38459dc5a4fd04f11bdbfee8295b76a4
|
refs/heads/master
| 2023-08-20T15:58:09.343044
| 2023-08-20T13:09:03
| 2023-08-20T13:09:03
| 163,677,173
| 3
| 3
|
Apache-2.0
| 2019-12-18T03:46:12
| 2018-12-31T14:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 781
|
py
|
from .import_overwrite import *
from .plugin_tree import *
from .snapshot import *
from .utils import *
from .client_extension import *
from .constants import *
from .exceptions import *
from .plugin import *
from .plugin_extractor import *
from .plugin_loader import *
from .plugin_root import *
from .helpers import *
__all__ = (
*import_overwrite.__all__,
*plugin_tree.__all__,
*snapshot.__all__,
*utils.__all__,
*client_extension.__all__,
*constants.__all__,
*exceptions.__all__,
*plugin.__all__,
*plugin_extractor.__all__,
*plugin_loader.__all__,
*plugin_root.__all__,
*helpers.__all__,
)
from .. import register_library_extension
register_library_extension('HuyaneMatsu.plugin_loader')
del register_library_extension
|
[
"re.ism.tm@gmail.com"
] |
re.ism.tm@gmail.com
|
96526cb71cfcf833e6e090c692e8579be40537a1
|
9264cda8d9bb152e4fed4923e6403a2334abbe89
|
/laxy_backend/tasks/orchestration.py
|
7d20f917adbe162b19ff3f10a961e7eaf4300c7a
|
[
"Apache-2.0"
] |
permissive
|
MonashBioinformaticsPlatform/laxy
|
b228d93690f7cb9c0658af44013497f6c756167c
|
bee9d283d0932dd845cbc9c7c090dde794d2ecbc
|
refs/heads/master
| 2023-08-26T06:25:11.188255
| 2023-08-22T05:13:09
| 2023-08-22T05:13:09
| 104,432,675
| 3
| 2
|
Apache-2.0
| 2023-06-27T23:34:18
| 2017-09-22T04:48:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from celery.utils.log import get_task_logger
from celery import shared_task
from celery import Celery, states, chain, group
from celery.exceptions import (Ignore,
InvalidTaskError,
TimeLimitExceeded,
SoftTimeLimitExceeded)
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task(bind=True, track_started=True)
def dispose_compute_resource(self, task_data, **kwargs):
from ..models import Job, ComputeResource
if task_data is None:
raise InvalidTaskError("task_data is None")
compute_resource_id = task_data.get('compute_resource_id', None)
# result = task_data.get('result')
if not compute_resource_id:
job_id = task_data.get('job_id')
job = Job.objects.get(id=job_id)
compute_resource_id = job.compute_resource.id
compute = ComputeResource.objects.get(id=compute_resource_id)
self.status = ComputeResource.STATUS_TERMINATING
self.save()
# TODO: Terminate the compute resource
# (different depending on cloud provider, resource type)
raise NotImplementedError()
################################################################
self.status = ComputeResource.STATUS_DECOMMISSIONED
self.save()
return task_data
|
[
"ajperry@pansapiens.com"
] |
ajperry@pansapiens.com
|
97e03d5ce5a1b878b9cc44e984b6afd2fed84a1b
|
e96cc817c768915eeff46027ded14e759e8042ff
|
/Python编程/系统编程/线程/thread_lock_stu.py
|
021d30991b4e9dbbb4127f5a6df3225f838428b2
|
[] |
no_license
|
fovegage/learn-python
|
e22a32207cf513ba0f8c3428e9c00138987c2359
|
93b8d3513769a0b7d492a7b515f289fe3f1efc4a
|
refs/heads/master
| 2023-06-08T13:44:57.274677
| 2023-05-29T05:52:35
| 2023-05-29T05:52:35
| 148,493,932
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
# -*- coding: utf-8 -*-
# @Author: fovegage
# @File: thread_lock_stu.py
# @Email: fovegage@gmail.com
# @Date: 2018-09-21 16:41:02
# @Last Modified time: 2018-09-21 16:41:07
import threading
# 可以加锁 也可以延时
num = 0
class My_Thread_1(threading.Thread):
def run(self):
global num
for i in range(1000000):
flag = mutex.acquire(True)
if flag:
num += 1
mutex.release()
print("线程1:{}".format(num))
class My_thread_2(threading.Thread):
def run(self):
global num
for i in range(1000000):
flag = mutex.acquire(True)
if flag:
num += 1
mutex.release()
print("线程2:{}".format(num))
if __name__ == '__main__':
mutex = threading.Lock()
# mutex.acquire()
t1 = My_Thread_1()
t1.start()
t2 = My_thread_2()
t2.start()
|
[
"fovegage@gmail.com"
] |
fovegage@gmail.com
|
af36a65541b839c6bbb15fa9e1fd4ff8e5374673
|
c09a4b4f02849c03ba536edda2bf920b655be6bc
|
/wyl/add_noise.py
|
6dc8a805bf7045866c98b4283c6c4910ad3bc427
|
[] |
no_license
|
jpober/brownscripts
|
33bcc70a31694dfb06f1314adb1402316540108c
|
c25789ec765b018eaad59d99a0a4264c75655265
|
refs/heads/master
| 2021-01-23T22:01:19.004636
| 2020-11-12T18:39:14
| 2020-11-12T18:39:14
| 57,912,669
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
import numpy as np, pyuvdata.uvdata as uvd, sys, optparse, aipy
import capo
from aipy.const import k
import glob, matplotlib.pyplot as plt
from scipy.io.idl import readsav
from IPython import embed
o = optparse.OptionParser()
o.set_usage('add_noise.py [options] obs')
o.add_option('-o', dest='outpath', help='Destination directory',default='/users/wl42/data/wl42/CALSIM/')
o.add_option('-f', dest='fhdpath', help='FHD directory', default='/users/wl42/data/wl42/FHD_out/fhd_PhaseII_TESTSET_EoR0/')
o.add_option('--gain', dest='gain', default=False, action='store_true', help='')
opts,args = o.parse_args(sys.argv[1:])
obs = args[0]
Trec = 30.
Tsky = 180.
fhdpath = opts.fhdpath
fn = glob.glob(fhdpath+'vis_data/'+obs+'*') + glob.glob(fhdpath+'metadata/'+obs+'*')
uv = uvd.UVData()
uv.read_fhd(fn,use_model=True)
dt = uv.integration_time
df = uv.channel_width
fqs = uv.freq_array[0]/1e6
Tsys = float(Tsky)*np.power(fqs/(180.),-2.6) + float(Trec)*np.ones(fqs.shape)
Area = (198000.-215000.)/(200.*200.-150.*150.)*(fqs*fqs-150.*150.)+215000.
sigs = k*Tsys/(Area*np.sqrt(df*dt))*1e23/np.sqrt(2)
#print sigs
print ' adding noise:'
for ff in range(uv.Nfreqs):
noise = (np.random.normal(0,sigs[ff],(uv.Nblts,uv.Nspws,uv.Npols))+1j*np.random.normal(0,sigs[ff],(uv.Nblts,uv.Nspws,uv.Npols)))*np.logical_not(uv.flag_array[:,:,ff])
uv.data_array[:,:,ff] += noise
if opts.gain:
print ' apply gains:'
cal = readsav(fhdpath+'calibration/'+obs+'_cal.sav',python_dict=True)
a1 = uv.ant_1_array[:uv.Nbls]
a2 = uv.ant_2_array[:uv.Nbls]
g = {'x':{1:[],2:[]},'y':{1:[],2:[]}}
for i in range(uv.Nbls):
g['x'][1].append(cal['cal']['GAIN'][0][0][a1[i]])
g['x'][2].append(cal['cal']['GAIN'][0][0][a2[i]])
g['y'][1].append(cal['cal']['GAIN'][0][1][a1[i]])
g['y'][2].append(cal['cal']['GAIN'][0][1][a2[i]])
g['x'][1] = g['x'][1]*uv.Ntimes
g['x'][2] = g['x'][2]*uv.Ntimes
g['y'][1] = g['y'][1]*uv.Ntimes
g['y'][2] = g['y'][2]*uv.Ntimes
g['x'][1] = np.array(g['x'][1])
g['x'][2] = np.array(g['x'][2])
g['y'][1] = np.array(g['y'][1])
g['y'][2] = np.array(g['y'][2])
for i in range(uv.Npols):
p1,p2 = aipy.miriad.pol2str[uv.polarization_array[i]]
uv.data_array[:,0][:,:,i] *= (g[p1][1]*g[p2][2].conj())
print ' writing...'
uv.write_uvfits(opts.outpath+obs+'.uvfits',spoof_nonessential=True)
|
[
"wenyang_li@brown.edu"
] |
wenyang_li@brown.edu
|
cc828390d5d25140f299b141f0bee2892c95787d
|
0028a9a0d3fb346c44a386d507579fa6288ec0b9
|
/payment_receipt_invoice/__manifest__.py
|
95b995d758d7ba810d0d58fbc761aebded2fac62
|
[] |
no_license
|
rpsjr/extra-addons
|
283a7e54c3dc67ba2cab2b28e03e2cd8e3bfbe2d
|
9f8906b7908ad373cc26405c6aea54b0cd5031cb
|
refs/heads/master
| 2022-07-31T21:19:04.013649
| 2020-05-16T19:12:19
| 2020-05-16T19:12:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
# Copyright (C) 2017-TODAY Cybrosys Technologies(<http://www.cybrosys.com>).
# Author: Niyas Raphy,Fasluca(<http://www.cybrosys.com>)
# you can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# GENERAL PUBLIC LICENSE (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Payment Receipt',
'summary': """Payment Receipt With Paid Details""",
'version': '10.0.1.0',
'description': """Payment Receipt With Paid Details""",
'author': 'Cybrosys Techno Solutions',
'company': 'Cybrosys Techno Solutions',
'website': 'http://www.cybrosys.com',
'category': 'Accounting',
'depends': ['base', 'account'],
'license': 'AGPL-3',
'data': [
'views/report_payment.xml',
'views/report.xml',
],
'demo': [],
'images': ['static/description/banner.jpg'],
'installable': True,
'auto_install': False,
}
|
[
"user@localhost.localdomain"
] |
user@localhost.localdomain
|
645ed88cfcfdedfaa7b157819933b2a425965edf
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/leetcode/problems/599.minimum-index-sum-of-two-lists.py
|
9f64bc499ff33c39eaa7f998f6fc339d5a9d0027
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481
| 2019-11-12T22:59:07
| 2019-11-12T22:59:07
| 138,658,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
#
# @lc app=leetcode id=599 lang=python3
#
# [599] Minimum Index Sum of Two Lists
#
# https://leetcode.com/problems/minimum-index-sum-of-two-lists/description/
#
# algorithms
# Easy (49.05%)
# Total Accepted: 69.6K
# Total Submissions: 141.9K
# Testcase Example: '["Shogun","Tapioca Express","Burger King","KFC"]\n' +
'["Piatti","The Grill at Torrey Pines","Hungry Hunter Steakhouse","Shogun"]'
#
#
# Suppose Andy and Doris want to choose a restaurant for dinner, and they both
# have a list of favorite restaurants represented by strings.
#
#
# You need to help them find out their common interest with the least list
# index sum. If there is a choice tie between answers, output all of them with
# no order requirement. You could assume there always exists an answer.
#
#
#
# Example 1:
#
# Input:
# ["Shogun", "Tapioca Express", "Burger King", "KFC"]
# ["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"]
# Output: ["Shogun"]
# Explanation: The only restaurant they both like is "Shogun".
#
#
#
# Example 2:
#
# Input:
# ["Shogun", "Tapioca Express", "Burger King", "KFC"]
# ["KFC", "Shogun", "Burger King"]
# Output: ["Shogun"]
# Explanation: The restaurant they both like and have the least index sum is
# "Shogun" with index sum 1 (0+1).
#
#
#
#
# Note:
#
# The length of both lists will be in the range of [1, 1000].
# The length of strings in both lists will be in the range of [1, 30].
# The index is starting from 0 to the list length minus 1.
# No duplicates in both lists.
#
#
#
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
|
[
"liseyko@gmail.com"
] |
liseyko@gmail.com
|
33f997ce05ea563ef525d2f8526b25d76942c1fa
|
201335e99ac66a1e404bda38c3ca0fe1006835ce
|
/network_model/builder/pytorch_builder.py
|
f79ef81e35cfc7bea464818473a06e9cb57bd13e
|
[
"MIT"
] |
permissive
|
yuga-n/ModelLearner
|
507c701cb5beea30e096a51c2ae1296cdc699f8b
|
3193efd5eb15172ba8231a34829942040fcb0fc5
|
refs/heads/main
| 2023-08-14T04:03:23.338993
| 2021-09-10T14:15:30
| 2021-09-10T14:15:30
| 406,409,911
| 0
| 0
|
MIT
| 2021-09-14T14:52:28
| 2021-09-14T14:52:27
| null |
UTF-8
|
Python
| false
| false
| 5,276
|
py
|
# -*- coding: utf-8 -*-
import keras.engine.training
from typing import Callable
from typing import Tuple
from typing import List
from typing import Union
from util_types import types_of_loco
from network_model.distillation.distillation_model_builder import DistllationModelIncubator
from network_model.build_model import builder_pt, builder_with_merge
from keras.callbacks import Callback
import torch
from torch.optim.optimizer import Optimizer
from torch.optim import SGD
from torch.nn.modules.loss import _Loss
from torch.nn import CrossEntropyLoss, Module
from network_model.wrapper.pytorch.model_pt import ModelForPytorch
from model_merger.pytorch.proc.distance.calculator import L1Norm
from model_merger.pytorch.proc.distance.abs_calculator import AbstractDistanceCaluclator
from model_merger.pytorch.proc.loss.calculator import AAEUMLoss
from model_merger.pytorch.proc.loss.abstract_calculator import AbstractLossCalculator
from model_merger.pytorch.proc.shiamese_loss import SiameseLoss, SiameseLossForInceptionV3
from model_merger.pytorch.siamese import SiameseNetworkPT
ModelBuilderResult = Union[keras.engine.training.Model, List[Callback]]
ModelBuilder = Union[Callable[[int], ModelBuilderResult],
Callable[[Union[str, Tuple[str, str]]], keras.engine.training.Model],
DistllationModelIncubator]
OptimizerBuilder = Callable[[Module], Optimizer]
def optimizer_builder(optimizer, **kwargs):
def build(base_model: Module):
kwargs["params"] = base_model.parameters()
return optimizer(**kwargs)
return build
default_optimizer_builder = optimizer_builder(SGD)
class PytorchModelBuilder(object):
def __init__(self,
img_size: types_of_loco.input_img_size = 28,
channels: int = 3,
model_name: str = "model1",
opt_builder: OptimizerBuilder = default_optimizer_builder,
loss: _Loss = None,
decide_dataset_generator=None,
nearest_data_ave_num=1,
will_calc_rate_real_data_train=False):
self.__img_size = img_size
self.__channels = channels
self.__model_name = model_name
self.__opt_builder = opt_builder
self.__loss = loss
self.__decide_dataset_generator = decide_dataset_generator
self.__nearest_data_ave_num = nearest_data_ave_num
self.__will_calc_rate_real_data_train = will_calc_rate_real_data_train
def build_raw_model(self, model_builder_input) -> torch.nn.Module:
if self.__model_name == "tempload":
return torch.jit.load(model_builder_input)
return builder_pt(model_builder_input, self.__img_size, self.__model_name)
def build_model_builder_wrapper(self, model_builder_input):
base_model = self.build_raw_model(model_builder_input)
optimizer = self.__opt_builder(base_model)
return ModelForPytorch.build_wrapper(base_model,
optimizer,
self.__loss,
decide_dataset_generator=self.__decide_dataset_generator,
nearest_data_ave_num=self.__nearest_data_ave_num,
will_calc_rate_real_data_train=self.__will_calc_rate_real_data_train)
def __call__(self, model_builder_input):
return self.build_model_builder_wrapper(model_builder_input)
class PytorchSiameseModelBuilder(PytorchModelBuilder):
def __init__(self,
q: float,
img_size: types_of_loco.input_img_size = 28,
channels: int = 3,
model_name: str = "model1",
opt_builder: OptimizerBuilder = default_optimizer_builder,
loss_calculator: AbstractLossCalculator = None,
calc_distance: AbstractDistanceCaluclator=L1Norm(),
is_inceptionv3: bool = False,
decide_dataset_generator=None,
nearest_data_ave_num=1,
will_calc_rate_real_data_train=False):
use_loss_calculator = AAEUMLoss(q) if loss_calculator is None else loss_calculator
loss = SiameseLossForInceptionV3(calc_distance, use_loss_calculator) if is_inceptionv3 else SiameseLoss(calc_distance, use_loss_calculator)
super(PytorchSiameseModelBuilder, self).__init__(img_size,
channels,
model_name,
opt_builder,
loss,
decide_dataset_generator,
nearest_data_ave_num,
will_calc_rate_real_data_train
)
def build_raw_model(self, model_builder_input) -> torch.nn.Module:
original_model = super(PytorchSiameseModelBuilder, self).build_raw_model(model_builder_input)
return SiameseNetworkPT(original_model)
|
[
"hamakaze181and189amarube@gmail.com"
] |
hamakaze181and189amarube@gmail.com
|
787c0af6845645273f03f517cdc63b368ff78526
|
00820b522cc16bf996f1ef44a94a2f31989c4065
|
/abc/abc151/b.py
|
83c9ebfb2c76f96625755c32b514c9fb111b83c2
|
[] |
no_license
|
yamato1992/at_coder
|
6dffd425163a37a04e37507743a15f67b29239fc
|
6e0ec47267ed3cae62aebdd3d149f6191fdcae27
|
refs/heads/master
| 2020-08-31T11:17:03.500616
| 2020-06-12T15:45:58
| 2020-06-12T15:45:58
| 218,678,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
N, K, M = map(int, input().split())
scores = list(map(int, input().split()))
req_score = N * M - sum(scores)
if req_score > K:
print(-1)
elif req_score < 0:
print(0)
else:
print(req_score)
|
[
"yamato.mitsui.orleans@gmail.com"
] |
yamato.mitsui.orleans@gmail.com
|
00fd44fd4c9944de27295296d9220003e0054ebc
|
1fb2da0e6f73652f0b0126c82a84562f6a8d3535
|
/946. Validate Stack Sequences.py
|
0e3b5ae404154d83fdb8c28dd8ada0394f1e5dfd
|
[] |
no_license
|
JaylenZhang19/Leetcode
|
be3456fcb45270c8aad797f965f4c7a1781c0e61
|
178546686aa3ae8f5da1ae845417f86fab9a644d
|
refs/heads/master
| 2023-02-27T06:08:58.818435
| 2021-01-31T20:28:10
| 2021-01-31T20:28:10
| 287,661,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
stack = []
while pushed:
stack.append(pushed.pop(0))
while stack and popped and stack[-1] == popped[0]:
stack.pop()
popped.pop(0)
if stack:
return False
return True
|
[
"noreply@github.com"
] |
JaylenZhang19.noreply@github.com
|
badf4d8ee3241875395d8cab7be4c5abe4aae39e
|
cbdef2e8ed259adc4653ade34db12d8bcc0cea9f
|
/dominion/cards/Card_Capital_City.py
|
8fd93365fb68c55deab0a00b7b76f4a9f427e947
|
[] |
no_license
|
dwagon/pydominion
|
8dd5afef8ec89c63ade74c4ae6c7473cd676799f
|
545709f0a41529de74f33aa83b106c456900fa5b
|
refs/heads/main
| 2023-08-29T10:02:26.652032
| 2023-08-23T02:25:00
| 2023-08-23T02:25:00
| 18,776,204
| 1
| 0
| null | 2023-08-23T02:25:02
| 2014-04-14T20:49:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,191
|
py
|
#!/usr/bin/env python
import unittest
from dominion import Game, Card, Piles
###############################################################################
class Card_Capital_City(Card.Card):
def __init__(self):
Card.Card.__init__(self)
self.cardtype = Card.CardType.ACTION
self.base = Card.CardExpansion.ALLIES
self.cards = 1
self.actions = 2
self.name = "Capital City"
self.desc = """+1 Card; +2 Actions; You may discard 2 cards for +$2.;
You may pay $2 for +2 Cards."""
self.cost = 5
def special(self, game, player):
ch1 = player.plr_choose_options(
"Discard 2 cards to gain $2 coin?",
("Do nothing", False),
("Discard 2 Cards", True),
)
if ch1:
discard = player.plr_discard_cards(num=2)
if len(discard) == 2:
player.coins.add(2)
if player.coins.get() >= 2:
ch2 = player.plr_choose_options(
"Pay $2 to gain 2 cards?",
("Do nothing", False),
("Gain 2 Cards", True),
)
if ch2:
player.coins.add(-2)
player.pickup_cards(2)
###############################################################################
class Test_Capital_City(unittest.TestCase):
def setUp(self):
self.g = Game.TestGame(numplayers=1, initcards=["Capital City"])
self.g.start_game()
self.plr = self.g.player_list()[0]
self.card = self.g["Capital City"].remove()
def test_play(self):
"""Play the card"""
self.plr.piles[Piles.HAND].set("Copper", "Copper", "Estate", "Duchy")
self.plr.piles[Piles.DECK].set("Gold", "Silver", "Copper", "Copper")
self.plr.add_card(self.card, Piles.HAND)
self.plr.test_input = [
"Discard",
"Discard Estate",
"Discard Duchy",
"Finish",
"Gain",
]
self.plr.play_card(self.card)
self.assertEqual(self.plr.coins.get(), 0)
self.assertEqual(self.plr.piles[Piles.HAND].size(), 4 + 1 - 2 + 2)
self.assertNotIn("Duchy", self.plr.piles[Piles.HAND])
self.assertIn("Silver", self.plr.piles[Piles.HAND])
def test_play_no_pickup(self):
"""Play the card but don't pickup new cards"""
self.plr.piles[Piles.HAND].set("Copper", "Copper", "Estate", "Duchy")
self.plr.piles[Piles.DECK].set("Gold", "Silver", "Copper", "Copper")
self.plr.add_card(self.card, Piles.HAND)
self.plr.test_input = [
"Discard",
"Discard Estate",
"Discard Duchy",
"Finish",
"nothing",
]
self.plr.play_card(self.card)
self.assertEqual(self.plr.coins.get(), 2)
self.assertEqual(self.plr.piles[Piles.HAND].size(), 4 + 1 - 2)
self.assertNotIn("Duchy", self.plr.piles[Piles.HAND])
self.assertNotIn("Silver", self.plr.piles[Piles.HAND])
###############################################################################
if __name__ == "__main__": # pragma: no cover
unittest.main()
# EOF
|
[
"dougal.scott@gmail.com"
] |
dougal.scott@gmail.com
|
31426e105b712e14f2356ac3431be2d91963cd4c
|
2ef009eaa4cc0a6a6d1aee6794f43d8409c99711
|
/python和linux高级编程阶段/05-多任务-协程/04-协程-greenlet.py
|
7df153bb2e4c8366df336493e6ef2e926ae83870
|
[] |
no_license
|
vkhaibao/PyProject
|
6ae833cef09d7000af00dd7c842d2db29a1cd0cc
|
2a733b34f337d4497051660f473a4cfb977fc15b
|
refs/heads/master
| 2022-11-22T07:41:22.630002
| 2019-07-29T01:17:17
| 2019-07-29T01:17:17
| 173,897,429
| 1
| 3
| null | 2020-07-22T23:14:22
| 2019-03-05T07:32:41
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
# coding=utf8
from greenlet import greenlet
import time
import os
def test1():
while True:
print("%s" % os.getpid())
print("%s" % os.getppid())
print("---A--")
gr2.switch()
time.sleep(0.5)
def test2():
while True:
print("%s" % os.getpid())
print("%s" % os.getppid())
print("---B--")
gr1.switch()
time.sleep(0.5)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
# 切换到gr1中运行
gr1.switch()
|
[
"280764069@qq.com"
] |
280764069@qq.com
|
eed197eb1c8885c234bee2ca408f919a4654981f
|
43ae032297b492fbdf2df478588d2367f59d0b6b
|
/3 - Types/3.3 - InbuiltTypes-DictionarySetArray/10-dictions-methods-setdefault.py
|
9a824b12c8c37a47ca2b3db55ee9176d4dbd8697
|
[] |
no_license
|
thippeswamydm/python
|
59fa4dbb2899894de5481cb1dd4716040733c378
|
db03b49eb531e75b9f738cf77399a9813d16166b
|
refs/heads/master
| 2020-07-05T06:57:18.575099
| 2019-10-23T04:30:27
| 2019-10-23T04:30:27
| 202,562,414
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Describes the assigning, working, and method usages of dictionaries
obj = {'name': 'Ganesh', 'age': 5}
# Add a key value pair
if 'color' not in obj:
obj['color'] = 'light-brown'
obj = {'name': 'Ganesh', 'age': 5}
# Using setdefault function
obj.setdefault('color', 'light-brown')
# 'light-brown'
print(obj)
# {'color': 'light-brown', 'age': 5, 'name': 'Ganesh'}
obj.setdefault('color', 'white')
# 'light-brown'
print(obj)
|
[
"ganeshsurfs@gmail.com"
] |
ganeshsurfs@gmail.com
|
e74bd9c8df9754782584f12a29542e54c31d5b05
|
210ecd63113ce90c5f09bc2b09db3e80ff98117a
|
/AbletonLive9_RemoteScripts/_Framework/ControlSurfaceComponent.py
|
f03b0d3222874fbb3521aac557d4e52d63bdf4e6
|
[] |
no_license
|
ajasver/MidiScripts
|
86a765b8568657633305541c46ccc1fd1ea34501
|
f727a2e63c95a9c5e980a0738deb0049363ba536
|
refs/heads/master
| 2021-01-13T02:03:55.078132
| 2015-07-16T18:27:30
| 2015-07-16T18:27:30
| 38,516,112
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,835
|
py
|
#Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/_Framework/ControlSurfaceComponent.py
from __future__ import absolute_import
import Live
from . import Task
from .Control import ControlManager
from .Dependency import dependency, depends
from .SubjectSlot import Subject
from .Util import lazy_attribute
class ControlSurfaceComponent(ControlManager, Subject):
"""
Base class for all classes encapsulating functions in Live
"""
name = ''
canonical_parent = None
is_private = False
_show_msg_callback = dependency(show_message=None)
_has_task_group = False
_layer = None
@depends(register_component=None, song=None)
def __init__(self, name = '', register_component = None, song = None, layer = None, is_enabled = True, is_root = False, *a, **k):
if not callable(register_component):
raise AssertionError
super(ControlSurfaceComponent, self).__init__(*a, **k)
self.name = name
raise layer is None or not is_enabled or AssertionError
self._explicit_is_enabled = is_enabled
self._recursive_is_enabled = True
self._is_enabled = self._explicit_is_enabled
self._is_root = is_root
self._allow_updates = True
self._update_requests = 0
self._song = song
self._layer = layer is not None and layer
register_component(self)
def disconnect(self):
if self._has_task_group:
self._tasks.kill()
self._tasks.clear()
super(ControlSurfaceComponent, self).disconnect()
@property
def is_root(self):
return self._is_root
def _internal_on_enabled_changed(self):
if self._layer:
if self.is_enabled():
grabbed = self._layer.grab(self)
if not grabbed:
raise AssertionError, 'Only one component can use a layer at atime'
else:
self._layer.release(self)
if self._has_task_group:
self.is_enabled() and self._tasks.resume()
else:
self._tasks.pause()
def on_enabled_changed(self):
self.update()
def update_all(self):
self.update()
def set_enabled(self, enable):
self._explicit_is_enabled = bool(enable)
self._update_is_enabled()
def _set_enabled_recursive(self, enable):
self._recursive_is_enabled = bool(enable)
self._update_is_enabled()
def _update_is_enabled(self):
if self._recursive_is_enabled:
is_enabled = self._explicit_is_enabled
self._is_enabled = is_enabled != self._is_enabled and is_enabled
self._internal_on_enabled_changed()
self.on_enabled_changed()
def set_allow_update(self, allow_updates):
allow = bool(allow_updates)
if self._allow_updates != allow:
self._allow_updates = allow
if self._allow_updates and self._update_requests > 0:
self._update_requests = 0
self.update()
def control_notifications_enabled(self):
return self.is_enabled()
def application(self):
return Live.Application.get_application()
def song(self):
return self._song
@lazy_attribute
@depends(parent_task_group=None)
def _tasks(self, parent_task_group = None):
tasks = parent_task_group.add(Task.TaskGroup())
if not self._is_enabled:
tasks.pause()
self._has_task_group = True
return tasks
def _get_layer(self):
return self._layer
def _set_layer(self, new_layer):
if self._layer != new_layer:
self._layer and self._layer.release(self)
self._layer = new_layer
if new_layer and self.is_enabled():
grabbed = new_layer.grab(self)
if not grabbed:
raise AssertionError, 'Only one component can use a layer at atime'
layer = property(_get_layer, _set_layer)
def is_enabled(self, explicit = False):
"""
Returns whether the component is enabled.
If 'explicit' is True the parent state is ignored.
"""
return self._is_enabled if not explicit else self._explicit_is_enabled
def on_track_list_changed(self):
"""
Called by the control surface if tracks are added/removed,
to be overridden
"""
pass
def on_scene_list_changed(self):
"""
Called by the control surface if scenes are added/removed, to
be overridden
"""
pass
def on_selected_track_changed(self):
"""
Called by the control surface when a track is selected, to be
overridden
"""
pass
def on_selected_scene_changed(self):
"""
Called by the control surface when a scene is selected, to be
overridden
"""
pass
@depends(parent_task_group=None)
def _register_timer_callback(self, callback, parent_task_group = None):
"""
DEPRECATED. Use tasks instead
"""
raise callable(callback) or AssertionError
raise parent_task_group.find(callback) is None or AssertionError
def wrapper(delta):
callback()
return Task.RUNNING
parent_task_group.add(Task.FuncTask(wrapper, callback))
@depends(parent_task_group=None)
def _unregister_timer_callback(self, callback, parent_task_group = None):
"""
DEPRECATED. Use tasks instead
"""
raise callable(callback) or AssertionError
task = parent_task_group.find(callback)
raise task is not None or AssertionError
parent_task_group.remove(task)
|
[
"admin@scoopler.com"
] |
admin@scoopler.com
|
c39858057a3901c9d072ff45806c19924868a44f
|
c4e3a57511eb7a39425847bdcd38a2207e560a13
|
/Algorithm/1026_Maximum_Difference_Between_NodeAncestor.py
|
7d5e3de66de692beca64df48a2fddd569d79d7de
|
[] |
no_license
|
Gi1ia/TechNoteBook
|
57af562b78278b7f937b906d1154b19f2c077ebd
|
1a3c1f4d6e9d3444039f087763b93241f4ba7892
|
refs/heads/master
| 2021-06-03T02:31:24.986063
| 2020-07-16T22:25:56
| 2020-07-16T22:25:56
| 141,761,958
| 7
| 1
| null | 2018-11-05T01:09:46
| 2018-07-20T22:06:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
"""Given the root of a binary tree,
find the maximum value V for which there exists different nodes A and B where
V = |A.val - B.val| and A is an ancestor of B.
(A node A is an ancestor of B if either: any child of A is equal to B,
or any child of A is an ancestor of B.)
Input: [8,3,10,1,6,null,14,null,null,4,7,13]
Output: 7
Explanation:
We have various ancestor-node differences, some of which are given below :
|8 - 3| = 5
|3 - 7| = 4
|8 - 1| = 7
|10 - 13| = 3
Among all possible differences, the maximum value of 7 is obtained by |8 - 1| = 7.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxAncestorDiff(self, root: TreeNode) -> int:
""" Solution I
Solution with Stack has much better performance.
Using stack to maintain max and min through the way
"""
res = 0
stack = [[root, root.val, root.val]] # node, max, min
while stack:
temp, cur_mx, cur_mn = stack.pop()
if temp.val > cur_mx:
cur_mx = temp.val
if temp.val < cur_mn:
cur_mn = temp.val
if cur_mx - cur_mn > res:
res = cur_mx - cur_mn
if temp.left:
stack.append([temp.left, cur_mx, cur_mn])
if temp.right:
stack.append([temp.right, cur_mx, cur_mn])
return res
def maxAncestorDiff_dfs(self, root: TreeNode) -> int:
"""Solution II
DFS solution is more clean and straight forward.
"""
return self.dfs(root, root.val, root.val)
def dfs(self, root, min_val, max_val):
if not root:
return max_val - min_val
max_val = max(max_val, root.val)
min_val = min(min_val, root.val)
return max(self.dfs(root.left, min_val, max_val), self.dfs(root.right, min_val, max_val))
|
[
"41492334+Gi1ia@users.noreply.github.com"
] |
41492334+Gi1ia@users.noreply.github.com
|
3de59b86a04b9a0c4689617def82d2aab258a76b
|
b586cec578da0e1904d07468a7f49dacc0af5e99
|
/chapter_4/util/Checker.py
|
497c6bddf812359153f10ce45072330700e03e0d
|
[
"MIT"
] |
permissive
|
LifeOfGame/mongodb_redis
|
bf21b989eeb95eeb39f684363f9436677252a63e
|
834fbdd65d4ea9e1e0056b711781e5f27a40333b
|
refs/heads/master
| 2021-06-22T17:01:19.497132
| 2019-08-20T06:54:21
| 2019-08-20T06:54:21
| 203,295,895
| 0
| 0
|
MIT
| 2021-03-20T01:37:02
| 2019-08-20T03:53:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
import re
class Checker(object):
FIELD_LIST = {'name', 'age', 'birthday', 'origin_home', 'current_home'}
def check_add_fields_exists(self, dict_tobe_inserted):
if not dict_tobe_inserted:
return False
return self.FIELD_LIST == set(dict_tobe_inserted.keys())
def check_update_fields_exists(self, dict_tobe_inserted):
if 'people_id' not in dict_tobe_inserted:
return False
return self.check_add_fields_exists(dict_tobe_inserted.get('updated_info', {}))
def check_value_valid(self, dict_tobe_inserted):
name = dict_tobe_inserted['name']
if not name:
return '姓名不能为空'
age = dict_tobe_inserted['age']
if not isinstance(age, int) or age < 0 or age > 120:
return '年龄必需是范围在0到120之间的整数'
birthday = dict_tobe_inserted['birthday']
if not re.match('\d{4}-\d{2}-\d{2}', birthday):
return '生日格式必需为:yyyy-mm-dd'
def transfer_people_id(self, people_id):
if isinstance(people_id, int):
return people_id
try:
people_id = int(people_id)
return people_id
except ValueError:
return -1
|
[
"greensouth@foxmail.com"
] |
greensouth@foxmail.com
|
55e6472b2b061c94777f53c74adbd2c5e99bbe6c
|
ae8a1631f1b0da3cbe7a61cc6ad8c4839d3017e2
|
/experiments/experiments_toy/grid_search/run_grid_search_bnmtf_gibbs.py
|
c2d9d99fbb2d5384a7f33b6803e3ced4b49aef4a
|
[
"Apache-2.0"
] |
permissive
|
hansaimlim/BNMTF
|
ce3a5734feed209d284d98b5db508f944781c880
|
9cf8ad6475dac5dc7ece9d6dffb7f6f59a71ac18
|
refs/heads/master
| 2021-01-19T18:47:41.870310
| 2017-02-08T16:26:39
| 2017-02-08T16:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,030
|
py
|
"""
Run the grid search method for finding the best values for K and L for BNMTF.
We use the parameters for the true priors.
For BNMTF I find that the BIC is a better estimator - the log likelihood is
high for higher values for K and L than the true ones, same for the AIC. With
the BIC we get a nice peak just below the true K and L (for true K=L=5, at K=L=4).
"""
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BNMTF.data_toy.bnmtf.generate_bnmtf import generate_dataset, try_generate_M
from BNMTF.code.cross_validation.grid_search_bnmtf import GridSearch
from BNMTF.code.models.bnmtf_gibbs_optimised import bnmtf_gibbs_optimised
import numpy, matplotlib.pyplot as plt
import scipy.interpolate
##########
restarts = 5
iterations = 100
burn_in = 90
thinning = 1
I, J = 20,20
true_K, true_L = 3,3
values_K, values_L = range(1,4+1), range(1,4+1)
fraction_unknown = 0.1
attempts_M = 100
alpha, beta = 100., 1. #1., 1.
tau = alpha / beta
lambdaF = numpy.ones((I,true_K))
lambdaS = numpy.ones((true_K,true_L))
lambdaG = numpy.ones((J,true_L))
classifier = bnmtf_gibbs_optimised
initFG = 'kmeans'
initS = 'random'
# Generate data
(_,_,_,_,_,R) = generate_dataset(I,J,true_K,true_L,lambdaF,lambdaS,lambdaG,tau)
M = try_generate_M(I,J,fraction_unknown,attempts_M)
# Run the line search. The priors lambdaF,S,G need to be a single value (recall K,L is unknown)
priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF[0,0], 'lambdaS':lambdaS[0,0], 'lambdaG':lambdaG[0,0] }
grid_search = GridSearch(classifier,values_K,values_L,R,M,priors,initS,initFG,iterations,restarts)
grid_search.search(burn_in,thinning)
# Plot the performances of all three metrics
for metric in ['loglikelihood', 'BIC', 'AIC','MSE']:
# Make three lists of indices X,Y,Z (K,L,metric)
values = numpy.array(grid_search.all_values(metric)).flatten()
list_values_K = numpy.array([values_K for l in range(0,len(values_L))]).T.flatten()
list_values_L = numpy.array([values_L for k in range(0,len(values_K))]).flatten()
# Set up a regular grid of interpolation points
Ki, Li = (numpy.linspace(min(list_values_K), max(list_values_K), 100),
numpy.linspace(min(list_values_L), max(list_values_L), 100))
Ki, Li = numpy.meshgrid(Ki, Li)
# Interpolate
rbf = scipy.interpolate.Rbf(list_values_K, list_values_L, values, function='linear')
values_i = rbf(Ki, Li)
# Plot
plt.figure()
plt.imshow(values_i, cmap='jet_r',
vmin=min(values), vmax=max(values), origin='lower',
extent=[min(values_K), max(values_K), min(values_L), max(values_L)])
plt.scatter(list_values_K, list_values_L, c=values, cmap='jet_r')
plt.colorbar()
plt.title("Metric: %s." % metric)
plt.xlabel("K")
plt.ylabel("L")
plt.show()
# Print the best value
best_K,best_L = grid_search.best_value(metric)
print "Best K,L for metric %s: %s,%s." % (metric,best_K,best_L)
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
2a4f4d986db59052b27d1c83c4126eaa341aae86
|
95c71453ed6cc6f9b94f38a3c1655680618d71a4
|
/kickstart/2019/RoundB/C/C-solve.py
|
c5fc44ebd49349f2f21430820232d8ea2d77f83c
|
[] |
no_license
|
ZX1209/gl-algorithm-practise
|
95f4d6627c1dbaf2b70be90149d897f003f9cb3a
|
dd0a1c92414e12d82053c3df981897e975063bb8
|
refs/heads/master
| 2020-05-16T14:56:34.568878
| 2019-12-27T07:37:11
| 2019-12-27T07:37:11
| 183,116,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
import logging
logging.basicConfig(level=logging.INFO)
from collections import Counter
def Count(i, j, counters, S):
ans = 0
tmpCounter = counters[j + 1] - counters[i]
for v in tmpCounter.values():
if v <= S:
ans += v
return ans
def main():
T = int(input())
for t in range(T):
answer = 0
N, S = [int(c) for c in input().split()]
A = input().split()
logging.debug((N, S, A))
tmpCounter = Counter()
counters = [tmpCounter.copy()]
for i in range(len(A)):
tmpCounter.update([A[i]])
counters.append(tmpCounter.copy())
tmpMax = 0
for i in range(len(A)):
for j in range(i + 1, len(A)):
tmp = Count(i, j, counters, S)
# logging.debug(tmp)
if tmp > tmpMax:
tmpMax = tmp
print("Case #" + str(t + 1) + ": " + str(tmpMax))
if __name__ == '__main__':
main()
|
[
"1404919041@qq.com"
] |
1404919041@qq.com
|
1e7fd7a6ddd8dfbb09be12e0e07a753cc3d0c789
|
a6155458f58f2e40e2583557cf807eda52a0013b
|
/catalog/project.py
|
1dd22dfbdb35631cbd11ea846986af4f79fa6d8b
|
[] |
no_license
|
georgeplusplus-ZZ/udacity-project-2
|
ab6c80052cc601508743fd5003ae5d09103d8fbb
|
5442f1f99808af2f8663d59fdbd02be7dd7e425a
|
refs/heads/master
| 2021-10-26T02:47:28.841918
| 2019-04-10T01:52:40
| 2019-04-10T01:52:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
from flask import jsonify
from flask import flash
from flask import abort
from flask import session as login_session
from sqlalchemy import create_engine, desc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Attraction
import os
import random
import string
# Globals
app = Flask(__name__, static_url_path='/static')
engine = create_engine('sqlite:///nycattractions.db', connect_args={'check_same_thread':False})
Base.metadata.bind = engine
DBSession = sessionmaker(bind= engine)
session = DBSession()
@app.route('/')
@app.route('/home')
def homepageContent():
items = session.query(Attraction).order_by(Attraction.created_at).limit(5).all()
return render_template('home.html', items= items)
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(
random.choice(string.ascii_uppercase + string.digits) for x in range(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/view/<string:attraction_type>')
def attractionContent(attraction_type):
attractions = session.query(Attraction).filter(Attraction.category.has(name= attraction_type.lower())).all()
if not len(attractions):
abort(404)
return render_template('attractions.html', attractions= attractions)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
73737b1228d0aa26332236f0bed3f9f92b2e599c
|
47175228ce25812549eb5203fc8b86b76fec6eb9
|
/API_scripts/dfp/dfp_python3/v201502/custom_targeting_service/create_custom_targeting_keys_and_values.py
|
4217821f245fa2d62bbcf1f3aa0132710d6031de
|
[] |
no_license
|
noelleli/documentation
|
c1efe9c2bdb169baa771e9c23d8f4e2683c2fe20
|
a375698b4cf0776d52d3a9d3c17d20143bd252e1
|
refs/heads/master
| 2021-01-10T05:41:30.648343
| 2016-02-13T05:46:31
| 2016-02-13T05:46:31
| 51,477,460
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,204
|
py
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates new custom targeting keys and values.
To determine which custom targeting keys and values exist, run
get_all_custom_targeting_keys_and_values.py. To target these custom targeting
keys and values, run target_custom_criteria_example.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CustomTargetingService.createCustomTargetingKeys
CustomTargetingService.createCustomTargetingValues
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201502')
# Create custom targeting key objects.
keys = [
{
'displayName': 'gender',
'name': 'g',
'type': 'PREDEFINED'
},
{
'displayName': 'car model',
'name': 'c',
'type': 'FREEFORM'
},
# Add predefined key that may be use for content targeting.
{
'displayName': 'genre',
'name': 'genre',
'type': 'PREDEFINED'
}
]
# Add custom targeting keys.
keys = custom_targeting_service.createCustomTargetingKeys(keys)
# Display results.
if keys:
for key in keys:
print(('A custom targeting key with id \'%s\', name \'%s\', and display '
'name \'%s\' was created.' % (key['id'], key['name'],
key['displayName'])))
else:
print('No keys were created.')
# Create custom targeting value objects.
values = [
{
'customTargetingKeyId': keys['id'],
'displayName': 'male',
# Name is set to 1 so that the actual name can be hidden from website
# users.
'name': '1',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys['id'],
'displayName': 'female',
# Name is set to 2 so that the actual name can be hidden from website
# users.
'name': '2',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[1]['id'],
'displayName': 'honda civic',
'name': 'honda civic',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[1]['id'],
'displayName': 'toyota',
'name': 'toyota',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[2]['id'],
'displayName': 'comedy',
'name': 'comedy',
'matchType': 'EXACT'
},
{
'customTargetingKeyId': keys[2]['id'],
'displayName': 'drama',
'name': 'drama',
'matchType': 'EXACT'
}
]
# Add custom targeting values.
values = custom_targeting_service.createCustomTargetingValues(values)
# Display results.
if values:
for value in values:
print(('A custom targeting value with id \'%s\', belonging to key with id'
' \'%s\', name \'%s\', and display name \'%s\' was created.'
% (value['id'], value['customTargetingKeyId'], value['name'],
value['displayName'])))
else:
print('No values were created.')
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
[
"noelle@makermedia.com"
] |
noelle@makermedia.com
|
1e7fe6c92b81c4aea805851ef702489814f31b83
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/289/78183/submittedfiles/testes.py
|
5e9a5aefdf3d8285ef690f3498545979c9f5af98
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
idade = int(input("Digite sua idade: "))
print('A idade do indivíduo é %d' % idade)
altura = float(input("Digite sua altura: ")
print('A altura do indivíduo eh %.2f' %altura)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9e23792c94cee031550934457cf9eebd418a436d
|
cdcd71b8bb238ae1084e08d8d7c21c3c5595ba5b
|
/warriors.test.py
|
9a3fae86babebf5d3d72be28581a6952cd39558d
|
[] |
no_license
|
linnil1/2020pdsa
|
7118be250286aaa6831a21fd71e9de62d919ca6c
|
772a560bc5ce88eb052e102df7e0437372fd7ac1
|
refs/heads/master
| 2023-03-13T20:10:10.596073
| 2021-03-12T01:52:16
| 2021-03-12T01:52:16
| 295,599,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,088
|
py
|
import sys
import json
from pprint import pprint
from concurrent.futures import ProcessPoolExecutor, as_completed
import numpy as np
from tqdm import tqdm
import imp
Warriors = imp.load_source("Warriors", 'warriors.sol.py').Warriors
def quesion(n):
# init
np.random.seed()
st = np.random.choice(1000, size=n)
rg = np.random.choice(n, size=n) // 2
arg = np.stack([st, rg])
ops = {
'strength': st,
'attack_range': rg,
'answer': Warriors().warriors(st, rg)
}
return ops
def generateQuestion(N, n):
all_ops = []
with ProcessPoolExecutor(max_workers=20) as executor:
ops = [executor.submit(quesion, n)
for _ in range(N)]
for op in as_completed(ops):
all_ops.append(op.result())
return all_ops
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
# cases = json.load(open("warriors.json"))
cases = []
cases.append({
'case': 1,
'score': 20,
'data': [
# 0
{'strength': [11, 13, 11, 7, 15],
'attack_range': [ 1, 8, 1, 7, 2],
'answer': [0, 0, 0, 3, 2, 3, 3, 3, 2, 4]},
# 1
{'strength': [11],
'attack_range': [1],
'answer': [0, 0]},
# 2
{'strength': [11, 15],
'attack_range': [1, 1],
'answer': [0, 0, 0, 1]},
# 3
{'strength': [11, 15],
'attack_range': [1, 1],
'answer': [0, 0, 0, 1]},
# 4
{'strength': [15, 11],
'attack_range': [1, 1],
'answer': [0, 1, 1, 1]},
],
})
for i, arg in enumerate(cases[-1]['data']):
cases[-1]['data'][i]['answer'] = Warriors().warriors(arg['strength'], arg['attack_range'])
cases.append({
'case': 2,
'score': 20,
'data': [
# 0
# increasing
{'strength': np.arange(100000),
'attack_range': np.ones(100000, dtype=np.int) * 1000000},
# 1
# decreasing
{'strength': np.flip(np.arange(100000)),
'attack_range': np.ones(100000, dtype=np.int) * 1000000},
# 2
# increasing + decreasing
{'strength': np.hstack([np.arange(100000), np.flip(np.arange(100000))]),
'attack_range': np.ones(200000, dtype=np.int) * 4000000},
# 3
# decreasing + increasing
{'strength': np.hstack([np.flip(np.arange(100000)), np.arange(100000)]),
'attack_range': np.ones(200000, dtype=np.int) * 4000000},
# 4
# increasing + no attack
{'strength': np.arange(100000),
'attack_range': np.zeros(100000, dtype=np.int)},
# 5
{'strength': [0],
'attack_range': [1],
'answer': [0, 0]},
# 6
{'strength': [0],
'attack_range': [0],
'answer': [0, 0]},
# 7
{'strength': [0, 0],
'attack_range': [0, 0],
'answer': [0, 0, 1, 1]},
# 8
{'strength': [0, 1],
'attack_range': [0, 0],
'answer': [0, 0, 1, 1]},
],
})
for i, arg in enumerate(cases[-1]['data']):
cases[-1]['data'][i]['answer'] = Warriors().warriors(arg['strength'], arg['attack_range'])
# 30 * 30 -> 1000ms
cases.append({
'case': 3,
'score': 20,
'data': generateQuestion(30, 10000),
})
# 2400ms
cases.append({
'case': 4,
'score': 20,
'data': [
quesion(100000),
quesion(200000),
quesion(300000),
quesion(400000),
]
})
# 2400ms
cases.append({
'case': 5,
'score': 20,
'data': [
quesion(1000000),
]
})
# 10000 -> 30ms
# 100000 -> 300ms
# 200000 -> 450ms
# 300000 -> 750ms
# 400000 -> 1000ms
# 500000 -> 1200ms
# 800000 -> 2000ms
# 1000000 -> 2400ms
json.dump(cases, open("warriors.json", "w"), cls=MyEncoder)
# pprint(cases)
|
[
"linnil1.886@gmail.com"
] |
linnil1.886@gmail.com
|
a3a7309b43e957fe90d03cf2bd26952fd25d1a50
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/appconfiguration/v20200701preview/key_value.py
|
0abcd6b95f09374cd0ce483a2ef8107baf4d53a8
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 7,486
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['KeyValue']
class KeyValue(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_store_name: Optional[pulumi.Input[str]] = None,
content_type: Optional[pulumi.Input[str]] = None,
key_value_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The key-value resource along with all resource properties.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_store_name: The name of the configuration store.
:param pulumi.Input[str] content_type: The content type of the key-value's value.
Providing a proper content-type can enable transformations of values when they are retrieved by applications.
:param pulumi.Input[str] key_value_name: Identifier of key and label combination. Key and label are joined by $ character. Label is optional.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A dictionary of tags that can help identify what a key-value may be applicable for.
:param pulumi.Input[str] value: The value of the key-value.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if config_store_name is None:
raise TypeError("Missing required property 'config_store_name'")
__props__['config_store_name'] = config_store_name
__props__['content_type'] = content_type
if key_value_name is None:
raise TypeError("Missing required property 'key_value_name'")
__props__['key_value_name'] = key_value_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['value'] = value
__props__['e_tag'] = None
__props__['key'] = None
__props__['label'] = None
__props__['last_modified'] = None
__props__['locked'] = None
__props__['name'] = None
__props__['type'] = None
super(KeyValue, __self__).__init__(
'azure-nextgen:appconfiguration/v20200701preview:KeyValue',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'KeyValue':
"""
Get an existing KeyValue resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return KeyValue(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Output[Optional[str]]:
"""
The content type of the key-value's value.
Providing a proper content-type can enable transformations of values when they are retrieved by applications.
"""
return pulumi.get(self, "content_type")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[str]:
"""
An ETag indicating the state of a key-value within a configuration store.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The primary identifier of a key-value.
The key is used in unison with the label to uniquely identify a key-value.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def label(self) -> pulumi.Output[str]:
"""
A value used to group key-values.
The label is used in unison with the key to uniquely identify a key-value.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The last time a modifying operation was performed on the given key-value.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def locked(self) -> pulumi.Output[bool]:
"""
A value indicating whether the key-value is locked.
A locked key-value may not be modified until it is unlocked.
"""
return pulumi.get(self, "locked")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A dictionary of tags that can help identify what a key-value may be applicable for.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> pulumi.Output[Optional[str]]:
"""
The value of the key-value.
"""
return pulumi.get(self, "value")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"noreply@github.com"
] |
test-wiz-sec.noreply@github.com
|
0e5f9524a6311e8ca3f114cb22491e4aaff80c2b
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-datastream/samples/generated_samples/datastream_v1alpha1_generated_datastream_delete_private_connection_async.py
|
d0a8f20842e7cadf8600231215035ef761fa4c53
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeletePrivateConnection
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datastream
# [START datastream_v1alpha1_generated_Datastream_DeletePrivateConnection_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastream_v1alpha1
async def sample_delete_private_connection():
# Create a client
client = datastream_v1alpha1.DatastreamAsyncClient()
# Initialize request argument(s)
request = datastream_v1alpha1.DeletePrivateConnectionRequest(
name="name_value",
)
# Make the request
operation = client.delete_private_connection(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END datastream_v1alpha1_generated_Datastream_DeletePrivateConnection_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
01324a0b0027287231b714660b00a89d2561d10a
|
a04c9e34c8abb6eb5857cb6e35fbbed0743ea8d4
|
/sample_db_funtions.py
|
aa1286b28047be47190becff32df50450902f654
|
[] |
no_license
|
SrikanthAmudala/PythonWorkShopConcordia
|
a2fd0a3103524733913c00767907bafecd1c6ad6
|
d2e383a89bc995d96313fd0723c064a0a45db6f9
|
refs/heads/master
| 2021-05-19T13:02:42.173832
| 2020-05-27T21:48:34
| 2020-05-27T21:48:34
| 251,713,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
db = {
"user_table": {
"sunny": {
"password": "sunny1234",
"books_reg": {
"COMPUTER SCIENCE": [],
"CIISE": []
}
},
"rohit": {
"password": "rohit",
"books_reg": {
"COMPUTER SCIENCE": [],
"CIISE": []
}
},
"negar": {
"password": "negar",
"books_reg": {
"COMPUTER SCIENCE": [],
"CIISE": []
}
}
},
"books_list": {
"COMPUTER SCIENCE": {
"C": 1, # name of the book: count of the book
"Python": 2,
"java": 1
},
"CIISE": {
"Quality Systems": 1,
"DataMining": 1,
"Project Management": 1
}
}
}
print("before update: \n",db)
username = "negar"
password = "negar"
password_from_db = db.get("user_table").get(username).get("password")
print("True password: ", password_from_db)
if password == password_from_db:
print("Login successful")
else:
print("Login Failed")
check_book = "Python"
book_catg = "COMPUTER SCIENCE"
total_books_avilable = db.get("books_list").get(book_catg).get(check_book)
if total_books_avilable > 0:
print("Book exists and total no of books available: ", total_books_avilable)
# adding book to the username
db['user_table'][username]['books_reg'][book_catg].append(check_book)
# updating the no of books in the book catg
db['books_list'][book_catg][check_book] = db.get('books_list').get(book_catg).get(check_book) - 1
else:
print("Book out of stock")
print("After update: \n",db)
|
[
"srikanthamudala95@gmail.com"
] |
srikanthamudala95@gmail.com
|
50534e04bd484b27316b647b50f6e6c2f9ee131e
|
ca101e77a77cd86561c4e34553dbd3578a87a8b2
|
/transaction/urls.py
|
01537875c84f1a09282b06841208d3e55e7f4e96
|
[] |
no_license
|
vmgabriel/app-cycle-money
|
001c1baa1b1c77c6e965beaee6d1d7c4cd45c699
|
4381fb9c8288fe37cbcd1c9ecef14e6e8299b680
|
refs/heads/master
| 2023-08-13T19:07:19.065576
| 2020-07-26T22:23:35
| 2020-07-26T22:23:35
| 281,472,685
| 0
| 0
| null | 2021-09-22T19:28:41
| 2020-07-21T18:20:36
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,181
|
py
|
# Develop: vmgabriel
"""Module that Define all Rooute of the module"""
from django.urls import path
from . import views
app_name = 'transactions'
urlpatterns = [
# Priorities Routes
path(
'priorities/',
views.PriorityEntityView.as_view(),
name='priorities_list',
),
path(
'priorities/create/',
views.PriorityEntityView.as_view(),
name='priorities_create'
),
path(
'priorities/<int:id>/edit/',
views.PriorityEntityView.as_view(),
name='priorities_edit'
),
path(
'priorities/<int:id>/delete/',
views.PriorityEntityView.as_view(),
name='priorities_delete'
),
path(
'priorities/<int:id>/show/',
views.PriorityEntityView.as_view(),
name='priorities_show'
),
# Type Consume Routes
path(
'type-consumes/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_list',
),
path(
'type-consumes/create/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_create'
),
path(
'type-consumes/<int:id>/edit/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_edit'
),
path(
'type-consumes/<int:id>/delete/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_delete'
),
path(
'type-consumes/<int:id>/show/',
views.TypeConsumeEntityView.as_view(),
name='type_consumes_show'
),
# Type Bill Routes
path(
'type-bills/',
views.TypeBillEntityView.as_view(),
name='type_bills_list',
),
path(
'type-bills/create/',
views.TypeBillEntityView.as_view(),
name='type_bills_create'
),
path(
'type-bills/<int:id>/edit/',
views.TypeBillEntityView.as_view(),
name='type_bills_edit'
),
path(
'type-bills/<int:id>/delete/',
views.TypeBillEntityView.as_view(),
name='type_bills_delete'
),
path(
'type-bills/<int:id>/show/',
views.TypeBillEntityView.as_view(),
name='type_bills_show'
),
]
|
[
"vmgabriel96@gmail.com"
] |
vmgabriel96@gmail.com
|
586486eb94a499645daff8e3b832c1c44d56ffb7
|
257b1d32488ff8a3e9b5f148839d042512d5de83
|
/testing/pandas_rollup_plugin_test.py
|
0105128b05ec100dffb941d4e6991c6d74a469be
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hellios78/mldb
|
200ba8b38e2509585d563403c4d4d12543e00dc9
|
5f869dcfca1f8bcce3418138f130321656a0970c
|
refs/heads/master
| 2020-04-06T04:10:38.369452
| 2015-12-23T22:06:08
| 2015-12-23T22:06:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
# This file is part of MLDB. Copyright 2015 Datacratic. All rights reserved.
import os, socket, time
#import csv
#import json
#import datetime
from multiprocessing import Process
from py_connectors.mldb_connector import MldbConnectorAdHoc
#from mldb.data import DFrame
def startMldb():
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
def startServer(port):
os.system("build/x86_64/bin/mldb_runner --http-listen-port %d --peer-listen-port %d" % (port, port+1))
proc = Process(target=startServer, args=[port])
proc.start()
time.sleep(1) # give enough time to start
if not proc.is_alive():
raise Exception("Failed to start api in background for test")
return proc, "http://localhost:%d" % port
mldbProc, mldbUrl = startMldb()
mldb = MldbConnectorAdHoc(mldbUrl).v1()
#######
# First we need to register the two plugins we'll be using
# b) the pandas_rollup plugin, which we'll use to do our exploration
######
pluginConfig = {
"type": "pandas_rollup",
"id": "pandas_rollup"
}
print mldb.plugins("pandas_rollup").put(pluginConfig, [("sync", "true")])
####
# Let's now create a script that we'll ship over and that will be executed
# on the server to create the dataset and import the data
####
scriptSource = """
import json
from datetime import datetime
print "Running a server-side script!!!"
# create a mutable beh dataset
datasetConfig = {
"type": "beh.mutable",
"id": "tng",
"address": "tng_py.beh.gz"
}
dataset = plugin.create_dataset(datasetConfig)
dataset.recordRow("picard", [["setscourse", "earth", datetime.fromtimestamp(1000)],
["setscourse", "neutralzone", datetime.fromtimestamp((10000))],
["setscourse", "neutralzone", datetime.fromtimestamp((20000))],
["setscourse", "neutralzone", datetime.fromtimestamp((30000))],
["setscourse", "neutralzone", datetime.fromtimestamp((4000))],
["setscourse", "wolf359", datetime.fromtimestamp((50000))]])
dataset.recordRow("riker", [["setscourse", "risa", datetime.fromtimestamp((500000))],
["fireon", "cardasians", datetime.fromtimestamp((500000))]])
dataset.recordRow("worf", [["fireon", "tardis", datetime.fromtimestamp((400000))],
["fireon", "borgcube", datetime.fromtimestamp((500000))],
["fireon", "cardasians", datetime.fromtimestamp((300000))]])
dataset.recordRow('One Zero', [["work", 1, datetime.fromtimestamp((300000))],
["sleep", 0, datetime.fromtimestamp((300000))]])
dataset.recordRow('Double', [["work", 1.5, datetime.fromtimestamp((300000))],
["sleep", 0.4, datetime.fromtimestamp((300000))]])
"""
# post the script for execution on the server
scriptConfig = {
"scriptSource": scriptSource
}
print MldbConnectorAdHoc(mldbUrl)._post("/v1/types/plugins/python/routes/run", "")(scriptConfig)
import json
queryConfig = {
"dataset": "tng",
"query": json.dumps({"head": "groupby", "body": [], "tail":["list_both"]})
}
print MldbConnectorAdHoc(mldbUrl)._post("/v1/plugins/pandas_rollup/routes/query", "")(queryConfig)
print queryConfig
# /v1/plugins/pandas_rollup/routes/query
# DFrame
|
[
"nicolas@datacratic.com"
] |
nicolas@datacratic.com
|
c10e69f9a1357a6a97db16ffe61328333a3c305f
|
5b1022e257e3825a2d4ddcd7fa071367bf1be073
|
/广铁新闻/IPPool.py
|
98f961ce3071371d98c290a958b47146a9bd310c
|
[] |
no_license
|
KwinnerChen/zkjw
|
465d36fb583ac474ce12ced831b890ed938767d6
|
9dd577e73144f51bde6fd1af9715cf162f32799a
|
refs/heads/master
| 2020-05-27T22:35:07.613419
| 2019-06-03T10:08:43
| 2019-06-03T10:08:43
| 188,807,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
#! usr/bin/env python3
# -*- coding: utf-8 -*-
# python: v3.6.4
from storage import Oracle
from queue import Queue
from threading import Thread
from time import sleep
from random import random
class IPPool(Queue):
'''
实例化时生成一个自更新的代理IP列队
'''
def __init__(self, user, password, host, table_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
self.password = password
self.host = host
self.table_name = table_name
# self.delay_time = delay_time
t = Thread(target=self.__refresh)
t.start()
def __refresh(self):
while True:
if self.empty():
self.__put_proxy_queue()
else:
sleep(random()*2)
def __get_proxy_database(self):
ora = Oracle(self.user, self.password, self.host)
data = ora.getall(self.table_name)
ora.close()
return data
def __put_proxy_queue(self):
data = self.__get_proxy_database()
for ip, http in data:
self.put({http: http + '://' + ip})
|
[
"chenkai.c.k@hotmail.com"
] |
chenkai.c.k@hotmail.com
|
d66838e90d413055054c3233f6efc543b06dd338
|
4a191e5aecd53c4cea28482a0179539eeb6cd74b
|
/comments/migrations/0001_initial.py
|
ec8c8dbcf06824eb238de80f5a28b2174fec528e
|
[] |
no_license
|
jiangjingwei/blogproject
|
631a2e8e2f72420cce45ddaf152174852376d831
|
daf14e88092dc030a3ab0c295ee06fb6b2164372
|
refs/heads/master
| 2020-03-14T23:29:08.052253
| 2018-05-10T11:35:59
| 2018-05-10T11:35:59
| 131,846,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-05-04 02:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255)),
('url', models.URLField(blank=True)),
('text', models.TextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Article')),
],
),
]
|
[
"270159429@qq.com"
] |
270159429@qq.com
|
f0b8047aadd151aeaac9c8760e4f603bfda43cde
|
d697c1d45e96bd440be9c17ab14243a5882b1f52
|
/qianfeng/高级/测试/单元测试/myTest.py
|
44d63133e889f79192b9e8b1dae90e67d5ad0f8d
|
[] |
no_license
|
ithjl521/python
|
9eeda2e60dda97ee36e8764c06400eb12818689f
|
f4fe50799501c483cb64445fd05ee0f30f56576c
|
refs/heads/master
| 2020-07-12T23:10:53.608276
| 2019-11-08T08:59:35
| 2019-11-08T08:59:35
| 204,931,359
| 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 429
|
py
|
# coding=gbk
import unittest
from 对函数进行单元测试 import mySum
from 对函数进行单元测试 import mySub
class Test(unittest.TestCase):
def setUp(self):
print("开始测试时自动调用")
def tearDown(self):
print("结束测试时自动调用")
# 为了测试mySum
def test_mySum(self):
self.assertEqual(mySum(1,2),3,"加法有误")
if __name__ == "__main__":
unittest.main()
|
[
"it_hjl@163.com"
] |
it_hjl@163.com
|
a9d1d757bf0eda6e44becfa0aa2e67aa7c86121a
|
a1517979b20286311bdac7cd153f698498e04223
|
/application/modules/tache/__init__.py
|
ee94819f39cb45b3b25e3ca5ba08fcdc39e07b59
|
[] |
no_license
|
wilrona/Gesacom_mongoDB
|
441367029b899ceb0304879fd808fb8dbdbfb457
|
d043136889c5f2c3e10ace8ebacf55c11b91b4c0
|
refs/heads/master
| 2020-12-25T14:23:39.343917
| 2017-07-07T16:38:26
| 2017-07-07T16:38:26
| 67,689,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
__author__ = 'Ronald'
from views_tache import *
app.register_blueprint(prefix_projet, url_prefix='/projet')
app.register_blueprint(prefix, url_prefix='/tache')
|
[
"wilrona@gmail.com"
] |
wilrona@gmail.com
|
aa9fd9f921e0e3d5e66dcd6281d0202037000305
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0701_0750/LeetCode728_SelfDividingNumbers.py
|
25a4ba8beb3af61a5e6a91825a9811b22e069d64
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
'''
Created on Mar 4, 2018
@author: tongq
'''
class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
res = []
for num in range(left, right+1):
if self.isSelfDividing(num):
res.append(num)
return res
def isSelfDividing(self, num):
for digit in str(num):
d = int(digit)
if d == 0 or num%d != 0:
return False
return True
def test(self):
testCases = [
[1, 22],
]
for left, right in testCases:
print('left: %s' % left)
print('right: %s' % right)
result = self.selfDividingNumbers(left, right)
print('result: %s' % result)
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
1dd0de92808d94d40c70083b3d598435fd4edaad
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/algorithm/expert_algo/2_20.py
|
8e3be8d2091b9b978bf541ef81f355ccec8d2e40
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,917
|
py
|
Number of ways to reach the end of matrix with non-zero AND value
Given an **N * N** matrix **arr[][]** consisting of non-negative integers, the
task is to find the number of ways to reach **arr[N – 1][N – 1]** with a non-
zero AND value starting from the **arr[0][0]** by going down or right in every
move. Whenever a cell **arr[i][j]** is reached, ‘AND’ value is updated as
**currentVal & arr[i][j]**.
**Examples:**
> **Input:** arr[][] = {
> {1, 1, 1},
> {1, 1, 1},
> {1, 1, 1}}
>
> **Output:** 6
> All the paths will give non-zero and value.
> Thus, number of ways equals 6.
>
> **Input:** arr[][] = {
> {1, 1, 2},
> {1, 2, 1},
> {2, 1, 1}}
> **Output:** 0
>
>
>
>
>
>
## Recommended: Please try your approach on **__{IDE}__** first, before moving
on to the solution.
**Approach:** This problem can be solved using dynamic programming. First, we
need to decide the states of the DP. For every cell **arr[i][j]** and a number
**X** , we will store the number of ways to reach the **arr[N – 1][N – 1]**
from **arr[i][j]** with non-zero AND where **X** is the AND value of path till
now. Thus, our solution will use 3-dimensional dynamic programming, two for
the coordinates of the cells and one for **X**.
The required recurrence relation is:
> dp[i][j][X] = dp[i][j + 1][X & arr[i][j]] + dp[i + 1][j][X & arr[i][j]]
Below is the implementation of the above approach:
## C++
__
__
__
__
__
__
__
// C++ implementation of the approach
#include <bits/stdc++.h>
#define n 3
#define maxV 20
using namespace std;
// 3d array to store
// states of dp
int dp[n][n][maxV];
// Array to determine whether
// a state has been solved before
int v[n][n][maxV];
// Function to return the count of required paths
int countWays(int i, int j, int x, int arr[][n])
{
// Base cases
if (i == n || j == n)
return 0;
x = (x & arr[i][j]);
if (x == 0)
return 0;
if (i == n - 1 && j == n - 1)
return 1;
// If a state has been solved before
// it won't be evaluated again
if (v[i][j][x])
return dp[i][j][x];
v[i][j][x] = 1;
// Recurrence relation
dp[i][j][x] = countWays(i + 1, j, x, arr)
+ countWays(i, j + 1, x, arr);
return dp[i][j][x];
}
// Driver code
int main()
{
int arr[n][n] = { { 1, 2, 1 },
{ 1, 1, 0 },
{ 2, 1, 1 } };
cout << countWays(0, 0, arr[0][0], arr);
return 0;
}
---
__
__
## Java
__
__
__
__
__
__
__
// Java implementation of the approach
class GFG {
static int n = 3;
static int maxV = 20;
// 3d array to store
// states of dp
static int[][][] dp = new int[n][n][maxV];
// Array to determine whether
// a state has been solved before
static int[][][] v = new int[n][n][maxV];
// Function to return the count of required paths
static int countWays(int i, int j,
int x, int arr[][])
{
// Base cases
if (i == n || j == n) {
return 0;
}
x = (x & arr[i][j]);
if (x == 0) {
return 0;
}
if (i == n - 1 && j == n - 1) {
return 1;
}
// If a state has been solved before
// it won't be evaluated again
if (v[i][j][x] == 1) {
return dp[i][j][x];
}
v[i][j][x] = 1;
// Recurrence relation
dp[i][j][x] = countWays(i + 1, j, x, arr)
+ countWays(i, j + 1, x, arr);
return dp[i][j][x];
}
// Driver code
public static void main(String[] args)
{
int arr[][] = { { 1, 2, 1 },
{ 1, 1, 0 },
{ 2, 1, 1 } };
System.out.println(countWays(0, 0, arr[0][0], arr));
}
}
// This code contributed by Rajput-Ji
---
__
__
## Python3
__
__
__
__
__
__
__
# Python3 implementation of the approach
n = 3
maxV = 20
# 3d array to store states of dp
dp = [[[0 for i in range(maxV)]
for i in range(n)]
for i in range(n)]
# Array to determine whether
# a state has been solved before
v = [[[0 for i in range(maxV)]
for i in range(n)]
for i in range(n)]
# Function to return
# the count of required paths
def countWays(i, j, x, arr):
# Base cases
if (i == n or j == n):
return 0
x = (x & arr[i][j])
if (x == 0):
return 0
if (i == n - 1 and j == n - 1):
return 1
# If a state has been solved before
# it won't be evaluated again
if (v[i][j][x]):
return dp[i][j][x]
v[i][j][x] = 1
# Recurrence relation
dp[i][j][x] = countWays(i + 1, j, x, arr) + \
countWays(i, j + 1, x, arr);
return dp[i][j][x]
# Driver code
arr = [[1, 2, 1 ],
[1, 1, 0 ],
[2, 1, 1 ]]
print(countWays(0, 0, arr[0][0], arr))
# This code is contributed by Mohit Kumar
---
__
__
## C#
__
__
__
__
__
__
__
// C# implementation of the approach
using System;
class GFG
{
static int n = 3;
static int maxV = 20;
// 3d array to store
// states of dp
static int[,,] dp = new int[n, n, maxV];
// Array to determine whether
// a state has been solved before
static int[,,] v = new int[n, n, maxV];
// Function to return the count of required paths
static int countWays(int i, int j,
int x, int [,]arr)
{
// Base cases
if (i == n || j == n)
{
return 0;
}
x = (x & arr[i, j]);
if (x == 0)
{
return 0;
}
if (i == n - 1 && j == n - 1)
{
return 1;
}
// If a state has been solved before
// it won't be evaluated again
if (v[i, j, x] == 1)
{
return dp[i, j, x];
}
v[i, j, x] = 1;
// Recurrence relation
dp[i, j, x] = countWays(i + 1, j, x, arr)
+ countWays(i, j + 1, x, arr);
return dp[i, j, x];
}
// Driver code
public static void Main()
{
int [,]arr = { { 1, 2, 1 },
{ 1, 1, 0 },
{ 2, 1, 1 } };
Console.WriteLine(countWays(0, 0, arr[0,0], arr));
}
}
// This code is contributed by AnkitRai01
---
__
__
**Output:**
1
Attention reader! Don’t stop learning now. Get hold of all the important DSA
concepts with the **DSA Self Paced Course** at a student-friendly price and
become industry ready. To complete your preparation from learning a language
to DS Algo and many more, please refer **Complete Interview Preparation
Course** **.**
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
cee8539e090be64a5f7173b3a057803ae9d66c8f
|
055e5a9b64e53d0a87d07ce724fdb05eadc00a8e
|
/jmatcher/users/migrations/0019_merge_20170410_0521.py
|
7057179ad7b2519ed202fd18cd66ab435f2bc945
|
[
"MIT"
] |
permissive
|
jamesaud/se1-group4
|
73e73cfe6f56a1d1f1b256f13230a28f8d272f6c
|
5280b13dff33e72ce717318a8dd78a06cd6effb3
|
refs/heads/master
| 2021-01-09T05:41:11.686168
| 2017-04-27T17:13:17
| 2017-04-27T17:13:17
| 80,780,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-10 05:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0015_auto_20170409_1556'),
('users', '0018_user_short_description'),
]
operations = [
]
|
[
"jamaudre@indiana.edu"
] |
jamaudre@indiana.edu
|
5670c82a43a643f5d733543fb0552927320696bb
|
832f86e052d90916fb0c8156825c87dc13c0443e
|
/imported-from-gmail/2020-01-16-fix-brackets.py
|
05fd311f1316bc6c204495b94f6525133aaf14b7
|
[] |
no_license
|
johncornflake/dailyinterview
|
292615849cea62cb945ecc7039c594b6966a81f3
|
91bb0edb9e25255e6222279109c15ae9d203970c
|
refs/heads/master
| 2022-12-09T21:02:12.204755
| 2021-06-07T13:09:34
| 2021-06-07T13:09:34
| 225,059,833
| 0
| 0
| null | 2022-12-08T11:27:38
| 2019-11-30T19:24:58
|
Python
|
UTF-8
|
Python
| false
| false
| 611
|
py
|
Hi, here's your problem today. This problem was recently asked by Twitter:
Given a string with only
(
and
)
, find the minimum number of characters to add or subtract to fix the string such that the brackets are balanced.
Example:
Input: '(()()'
Output: 1
Explanation:
The fixed string could either be
()()
by deleting the first bracket, or
(()())
by adding a bracket. These are not the only ways of fixing the string, there are many other ways by adding it in different positions!
Here's some code to start with:
def
fix_brackets
(
s
):
# Fill this in.
print
fix_brackets
(
'(()()'
)
# 1
|
[
"chadeous@gmail.com"
] |
chadeous@gmail.com
|
e75dd8c6b2b4b77b7eb7778f390b6182c92f1b16
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03696/s991573247.py
|
2a4297734905538666e9564192e9f911fa2b72ac
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
# coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
#from heapq import heappop, heappush
#from collections import defaultdict
sys.setrecursionlimit(10**7)
#import math
#from itertools import product, accumulate, combinations, product
#import bisect
#import numpy as np
#from copy import deepcopy
#from collections import deque
#from decimal import Decimal
#from numba import jit
INF = 1 << 50
def run():
N = int(input())
S = input()
position = 0
l = 0
r = 0
for s in S:
if s == '(':
position += 1
else:
position -= 1
if position < 0:
l += abs(position)
position = 0
r += position
print(l * '(' + S + r * ')')
if __name__ == "__main__":
run()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5ff59f2a7c0c5bfe42bfba11cc687c1a6f58470f
|
e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d
|
/corpora/corpus_reader.py
|
87865c2db4af9e8d3a0af9f33d532c0e28e798b8
|
[] |
no_license
|
neuroph12/nlpy
|
3f3d1a8653a832d6230cb565428ee0c77ef7451d
|
095976d144dacf07414bf7ee42b811eaa67326c1
|
refs/heads/master
| 2020-09-16T08:24:37.381353
| 2016-09-10T19:24:05
| 2016-09-10T19:24:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
import nltk
from nltk.corpus import gutenberg
emma = gutenberg.words('austen-emma.txt')
print(len(emma))
emma = nltk.Text(gutenberg.words('austen-emma.txt'))
print(emma.concordance('surprize'))
# raw = gutenberg.raw("burgess-busterbrown.txt")
# print(raw[1:20])
# words
# words = gutenberg.words("burgess-busterbrown.txt")
# print(words[1:20])
# sents = gutenberg.sents("burgess-busterbrown.txt")
# print(sents[1:20])
for fileid in gutenberg.fileids():
num_chars = len(gutenberg.raw(fileid))
num_words = len(gutenberg.words(fileid))
num_sents = len(gutenberg.sents(fileid))
num_vocab = len(set(w.lower() for w in gutenberg.words(fileid)))
print(round(num_chars / num_words), round(num_words / num_sents), round(num_words / num_vocab), fileid)
|
[
"anderscui@gmail.com"
] |
anderscui@gmail.com
|
672f0214711e47b569a7596461b9befa37e15306
|
150d9e4cee92be00251625b7f9ff231cc8306e9f
|
/PathSumII.py
|
dfcf36fff64e9e02cdc4acebd71dc0fec6b03159
|
[] |
no_license
|
JerinPaulS/Python-Programs
|
0d3724ce277794be597104d9e8f8becb67282cb0
|
d0778178d89d39a93ddb9b95ca18706554eb7655
|
refs/heads/master
| 2022-05-12T02:18:12.599648
| 2022-04-20T18:02:15
| 2022-04-20T18:02:15
| 216,547,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
'''
Path Sum II
Given the root of a binary tree and an integer targetSum, return all root-to-leaf paths where each path's sum equals targetSum.
A leaf is a node with no children.
Example 1:
Input: root = [5,4,8,11,null,13,4,7,2,null,null,5,1], targetSum = 22
Output: [[5,4,11,2],[5,8,4,5]]
Example 2:
Input: root = [1,2,3], targetSum = 5
Output: []
Example 3:
Input: root = [1,2], targetSum = 0
Output: []
Constraints:
The number of nodes in the tree is in the range [0, 5000].
-1000 <= Node.val <= 1000
-1000 <= targetSum <= 1000
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def pathSum(self, root, targetSum):
"""
:type root: TreeNode
:type targetSum: int
:rtype: List[List[int]]
"""
result = []
current = []
def dfs(root, target, current):
if root is None:
return False
current.append(root.val)
if root.left is None and root.right is None:
if target == root.val:
result.append(current)
return True
dfs(root.left, target - root.val, current[:])
dfs(root.right, target - root.val, current[:])
return
dfs(root, targetSum, current)
return result
root = TreeNode(5)
root.left = TreeNode(4)
root.right = TreeNode(8)
root.left.left = TreeNode(11)
root.right.left = TreeNode(13)
root.left.left.left = TreeNode(7)
root.left.left.right = TreeNode(2)
root.right.right = TreeNode(4)
root.right.right.left = TreeNode(5)
root.right.right.right = TreeNode(1)
obj = Solution()
print(obj.pathSum(root, 22))
|
[
"jerinsprograms@gmail.com"
] |
jerinsprograms@gmail.com
|
f498651857a6a53c8e4ebfeb0f204f23da1d3690
|
5b683c7f0cc23b1a2b8927755f5831148f4f7e1c
|
/Python_Study/DataStructureAndAlgorithm/company_programming_test/度小满/duxiaoman_1.py
|
44a022a61f6419461c25e2f49b922a112256b254
|
[] |
no_license
|
Shmilyqjj/Shmily-py
|
970def5a53a77aa33b93404e18c57130f134772a
|
770fc26607ad3e05a4d7774a769bc742582c7b64
|
refs/heads/master
| 2023-09-02T04:43:39.192052
| 2023-08-31T03:28:39
| 2023-08-31T03:28:39
| 199,372,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
Description:
Author:jiajing_qu
Date: 2019/9/15 19:44
穿越障碍物
时间限制:C/C++语言 1000MS;其他语言 3000MS
内存限制:C/C++语言 65536KB;其他语言 589824KB
题目描述:
你现在在(0,0),需要到(x,y)去,路上有n个障碍物。给出每个障碍物的坐标,你只能平行于坐标轴走整数步,问你最少需要多少步才能走到目的地。
输入
第一行三个数x,y,n
接下来n行,每行描述一个障碍物的坐标x_i,y_i
-500≤x,y,x_i,y_i≤500
n≤10000
保证有解
输出
输出一个数,代表最少的步数。
样例输入
2 0 3
1 0
1 1
1 -1
样例输出
6
"""
|
[
"710552907@qq.com"
] |
710552907@qq.com
|
9e417c4b3a95f7d673fce3c57bf7c5e06c2e5714
|
d6c117812a618ff34055488337aaffea8cf81ca1
|
/database/TupleSqlite3.py
|
27c1e232d6c79bc55a122c9de4b5b04e955b77e3
|
[] |
no_license
|
c0ns0le/Pythonista
|
44829969f28783b040dd90b46d08c36cc7a1f590
|
4caba2d48508eafa2477370923e96132947d7b24
|
refs/heads/master
| 2023-01-21T19:44:28.968799
| 2016-04-01T22:34:04
| 2016-04-01T22:34:04
| 55,368,932
| 3
| 0
| null | 2023-01-22T01:26:07
| 2016-04-03T21:04:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
# coding: utf-8
# https://forum.omz-software.com/topic/2375/problem-with-list-comprehension
from collections import namedtuple
import sqlite3
from random import randint
from faker import Faker
fake = Faker()
my_def = {'namedtuple_name': 'REC',
'field_names' :[('id' , 'INTEGER PRIMARY KEY'), ('resid','INTEGER UNIQUE'), ('key','TEXT') , ('ord','INTEGER'), ('value', 'INTEGER'), ('value1','TEXT'), ('data','TEXT'), ('pickled', 'INTEGER'),]
,}
'''
my_def = {'namedtuple_name': 'REC',
'field_names' :[('id' , 'INTEGER PRIMARY KEY'), ('resid','INTEGER UNIQUE'), ('key','TEXT') , ('ord','INTEGER'), ('data','TEXT'),]
,}
'''
MY_REC = my_def['namedtuple_name']
MY_REC = namedtuple(my_def['namedtuple_name'],[fld[0] for fld in my_def['field_names']])
MY_REC.__new__.__defaults__ = tuple((None for x in range(0,len(MY_REC._fields))))
mytbl_def = MY_REC._make(val[1] for val in my_def['field_names'])
_table_sql_new = '''CREATE TABLE IF NOT EXISTS '{0}' ({1})'''.format('{0}', ', '.join(mytbl_def._fields[i] + ' ' + item for i, item in enumerate(mytbl_def)) )
insert_pattern = '({0})'.format(','.join( c for c in str('?' * len(MY_REC._fields))))
_insert_sql = ''' INSERT INTO {0} VALUES ''' + insert_pattern
if __name__ == '__main__':
db_name = 'test.db'
db_table = 'table_c'
db_num_recs_to_add = 51
db = sqlite3.connect(db_name)
db.execute(_table_sql_new.format(db_table))
# using randint() for testing...resid is unquie
for i in range(1, db_num_recs_to_add):
r = MY_REC(resid = randint(1, 500000), key = fake.city(), data = fake.first_name())
db.execute(_insert_sql.format(db_table), [v for v in r])
db.commit()
cur = db.execute('SELECT * FROM {0}'.format(db_table))
for row in cur:
print repr(row)
db.close()
|
[
"itdamdouni@gmail.com"
] |
itdamdouni@gmail.com
|
84a5f8ae828e44cebcb306f6837843ac1f96503a
|
48b9d828acf80792bc4385febaa734a2e96ad465
|
/test-openmps/Examples/08_Heisenberg_spinone_iMPS.py
|
cb60a6f9b6a1ef388cf4d116e46e08b2104d414d
|
[
"MIT"
] |
permissive
|
OminiaVincit/qphase-trans
|
dd4ab2e0cacc449ead3bef318a65eb05aed45621
|
40e0c078dcd74282e8d8f44690433bf670bff8cb
|
refs/heads/master
| 2023-05-06T12:14:30.368375
| 2021-05-28T05:11:58
| 2021-05-28T05:11:58
| 235,478,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,663
|
py
|
import MPSPyLib as mps
import numpy as np
import sys
import os.path
def main(PostProcess=False):
# Build operators
Operators = mps.BuildSpinOperators(spin=1.0)
# Define Hamiltonian MPO
H = mps.MPO(Operators)
H.AddMPOTerm('bond', ['splus', 'sminus'], hparam='J_xy', weight=0.5)
H.AddMPOTerm('bond', ['sz','sz'], hparam='J_z', weight=1.0)
# Ground state observables
myObservables = mps.Observables(Operators)
# Site terms
myObservables.AddObservable('site', 'sz', 'z')
# correlation functions
myObservables.AddObservable('corr', ['sz', 'sz'], 'zz')
myObservables.AddObservable('corr', ['splus', 'sminus'], 'pm')
# Get correlation functions out to a distance of 1000
myObservables.SpecifyCorrelationRange(1000)
# Convergence parameters
myConv = mps.iMPSConvParam(max_bond_dimension=12, variance_tol=-1.0,
max_num_imps_iter=1000)
mod_list = ['max_bond_dimension','max_num_imps_iter']
myConv.AddModifiedConvergenceParameters(0, mod_list, [20, 500])
myConv.AddModifiedConvergenceParameters(0, mod_list, [40, 250])
# Long run time (Enable if you prefer)
#myConv.AddModifiedConvergenceParameters(0, mod_list, [60, 250])
#myConv.AddModifiedConvergenceParameters(0, mod_list, [80, 250])
L = 2
# Define statics
parameters = [{
# Directories
'job_ID' : 'Spin1.0Heisenberg',
'Write_Directory' : 'TMP_08/',
'Output_Directory' : 'OUTPUTS_08/',
# System size and Hamiltonian parameters
'L' : L,
'J_z' : 1.0,
'J_xy' : 1.0,
'simtype' : 'Infinite',
# Convergence parameters
'MPSObservables' : myObservables,
'MPSConvergenceParameters' : myConv,
'logfile' : True
}]
# Write Fortran-readable main files
MainFiles = mps.WriteFiles(parameters, Operators, H,
PostProcess=PostProcess)
# Run the simulations
if(not PostProcess):
if os.path.isfile('./Execute_MPSMain'):
RunDir = './'
else:
RunDir = None
mps.runMPS(MainFiles, RunDir=RunDir)
return
# Postprocessing and plotting
# ---------------------------
Outputs = mps.ReadStaticObservables(parameters)
clfilename = parameters[0]['job_ID'] + 'correlationLength.dat'
clfile = open(clfilename, 'w')
for Output in Outputs:
chi = Output['max_bond_dimension']
state = Output['state']
print('Chi', chi, 'state', state,
'energy density', Output['energy_density'])
if(state == 0):
corrfilename = parameters[0]['job_ID'] + 'chi' + str(chi) \
+ 'corr.dat'
corrfile = open(corrfilename, 'w')
for ii in range(0, myObservables.correlation_range):
corrfile.write('%16i'%(ii) + '%30.15E'%(Output['zz'][ii])
+ '%30.15E'%(Output['pm'][ii]) + '\n')
corrfile.close()
clfile.write('%16i'%(chi) + '%30.15E'%(Output['Correlation_length'])
+ '\n')
print(sum(Output['z']), Output['zz'][0:6])
clfile.close()
return
if(__name__ == '__main__'):
# Check for command line arguments
Post = False
for arg in sys.argv[1:]:
key, val = arg.split('=')
if(key == '--PostProcess'): Post = (val == 'T') or (val == 'True')
# Run main function
main(PostProcess=Post)
|
[
"k09tranhoan@gmail.com"
] |
k09tranhoan@gmail.com
|
53b08e77991e0b1c805ef8a3b886ebef4fe0b617
|
47a17b7b649e90ad0eedb270603193eb55703dba
|
/webapps/API/settings/requests.py
|
e8da027820d888797c2f58b52f88f9ddf45fa539
|
[] |
no_license
|
omiguelperez/kaumer-django-knowledge-test
|
25ff72f0e91e8ebeb542b80921b7083addf2cc3b
|
63da7943d00e37f4ee8edf6c7cc0cb46656deabf
|
refs/heads/master
| 2021-09-28T19:09:49.716506
| 2018-11-19T19:15:38
| 2018-11-19T19:15:38
| 157,953,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
# noinspection PyAbstractClass
class CreateSettingSerializer(serializers.Serializer):
basic_salary = serializers.FloatField()
transport_assistance = serializers.FloatField()
holiday_percentage = serializers.FloatField()
unemployment_percentage = serializers.FloatField()
unemployment_interest = serializers.FloatField()
premium_services = serializers.FloatField()
health_percentage = serializers.FloatField()
pension_percentage = serializers.FloatField()
occupational_hazards = serializers.FloatField()
cash_contributions = serializers.FloatField()
|
[
"mr.omiguelperez@gmail.com"
] |
mr.omiguelperez@gmail.com
|
340a21ad7d528e64aeca54c6fcb9ac365b69db55
|
47c2b01b04ed3ea7c55875b5ea412d90becd970b
|
/tests/physical_system/test_routing.py
|
256b1994db12aaa6e854f0f9c384573f153dc669
|
[] |
no_license
|
adysonmaia/phd-sp-dynamic
|
f2df9bee38a0246f40739a8e413ec4cb832ab03f
|
ce7045918f60c92ce1ed5ca4389b969bf28e6b82
|
refs/heads/master
| 2023-04-03T20:10:32.593381
| 2020-12-28T11:12:06
| 2020-12-28T11:12:06
| 355,110,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,132
|
py
|
from sp.core.model import Scenario, System, EnvironmentInput
from sp.physical_system.routing.shortest_path import ShortestPathRouting
from sp.physical_system.estimator import DefaultLinkDelayEstimator
import json
import unittest
class RoutingTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = "tests/physical_system/fixtures/test_routing.json"
system = None
with open(filename) as json_file:
data = json.load(json_file)
system = System()
system.scenario = Scenario.from_json(data)
cls.system = system
def setUp(self):
self.assertIsInstance(self.system, System)
self.assertEqual(len(self.system.nodes), 11)
self.assertEqual(len(self.system.bs_nodes), 9)
self.assertEqual(len(self.system.users), 1)
self.assertEqual(len(self.system.apps), 1)
self.assertEqual(self.system.apps[0].id, 0)
time = 0
self.system.time = time
self.environment = EnvironmentInput.create_empty(self.system)
def test_shortest_path(self):
app_id = 0
app = self.system.get_app(app_id)
routing = ShortestPathRouting()
routing.static_routing = True
routing.link_delay_estimator = DefaultLinkDelayEstimator()
routing.update(self.system, self.environment)
for link in self.system.links:
l_nodes = list(link.nodes_id)
path = routing.get_path(app.id, *l_nodes)
dist = routing.get_path_length(app.id, *l_nodes)
self.assertListEqual(l_nodes, path)
self.assertEqual(dist, 1.001)
for node in self.system.nodes:
path = routing.get_path(app.id, node.id, node.id)
dist = routing.get_path_length(app.id, node.id, node.id)
self.assertListEqual(path, [])
self.assertEqual(dist, 0.0)
path = routing.get_path(app.id, 0, 10)
dist = routing.get_path_length(app.id, 0, 10)
self.assertEqual(len(path), 7)
self.assertEqual(round(dist, 3), 6.006)
if __name__ == '__main__':
unittest.main()
|
[
"adyson.maia@gmail.com"
] |
adyson.maia@gmail.com
|
b74fb1a17ed2f7316dcf55fb45d1f45bf67c4f0f
|
8cbd55d35a179dff6a7c23a6835bcd329cba8bee
|
/simplekiq/base.py
|
6a751156867038799c739df70a90f232355e729c
|
[
"Apache-2.0"
] |
permissive
|
charsyam/simplekiq
|
31e3dac37981f48d80cbf8c8d921bebf8cf8a7cc
|
cd8b02078e06af64d79c5498af55fcdfbaf81676
|
refs/heads/main
| 2023-06-24T18:08:47.468046
| 2021-07-28T15:59:57
| 2021-07-28T15:59:57
| 380,718,347
| 0
| 0
|
Apache-2.0
| 2021-07-28T15:59:58
| 2021-06-27T11:06:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
from .constants import Constants
import json
import redis
class KiqQueue:
def __init__(self, addr, name, create=True):
if not name:
raise Exception("Queue name should be supported")
self.addr = addr
self.conn = self.connect_to_redis(addr)
self._name = name
self._queue_name = Constants.QUEUE_TPL.format(name)
if create:
self.conn.sadd(Constants.QUEUES_NAME, self._name)
def connect_to_redis(self, addr):
return redis.from_url(f"redis://{addr}/")
@property
def name(self):
return self._name
@property
def queue_name(self):
return self._queue_name
def enqueue(self, event):
try:
self.conn.rpush(self.queue_name, json.dumps(event))
return True
except redis.exceptions.ConnectionError as e:
self.conn = self.connect_to_redis(self.addr)
return False
def dequeue(self, wait=True):
try:
if wait:
v = self.conn.blpop(self.queue_name)[1]
else:
v = self.conn.lpop(self.queue_name)
if v:
return json.loads(v.decode('utf-8'))
else:
return None
except redis.exceptions.ConnectionError as e:
self.conn = self.connect_to_redis(self.addr)
return None
|
[
"charsyam@naver.com"
] |
charsyam@naver.com
|
7a05c0aaddc62ea69efb81d1cbd4b5c08f771f64
|
209c876b1e248fd67bd156a137d961a6610f93c7
|
/python/paddle/fluid/tests/unittests/test_increment.py
|
34c7af4ac081e315c1694b8ef917fc2f08febfa0
|
[
"Apache-2.0"
] |
permissive
|
Qengineering/Paddle
|
36e0dba37d29146ebef4fba869490ecedbf4294e
|
591456c69b76ee96d04b7d15dca6bb8080301f21
|
refs/heads/develop
| 2023-01-24T12:40:04.551345
| 2022-10-06T10:30:56
| 2022-10-06T10:30:56
| 544,837,444
| 0
| 0
|
Apache-2.0
| 2022-10-03T10:12:54
| 2022-10-03T10:12:54
| null |
UTF-8
|
Python
| false
| false
| 2,252
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
class TestIncrement(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.layers.fill_constant(shape=[1],
dtype='int64',
value=5)
expected_result = np.array([8], dtype='int64')
output = paddle.tensor.math.increment(input, value=3)
exe = fluid.Executor(fluid.CPUPlace())
result = exe.run(fetch_list=[output])
self.assertEqual((result == expected_result).all(), True)
with fluid.dygraph.guard():
input = paddle.ones(shape=[1], dtype='int64')
expected_result = np.array([2], dtype='int64')
output = paddle.tensor.math.increment(input, value=1)
self.assertEqual((output.numpy() == expected_result).all(), True)
class TestInplaceApiWithDataTransform(unittest.TestCase):
def test_increment(self):
if fluid.core.is_compiled_with_cuda():
paddle.enable_static()
with paddle.fluid.device_guard("gpu:0"):
x = paddle.fluid.layers.fill_constant([1], "float32", 0)
with paddle.fluid.device_guard("cpu"):
x = paddle.increment(x)
exe = paddle.static.Executor(paddle.CUDAPlace(0))
a, = exe.run(paddle.static.default_main_program(), fetch_list=[x])
paddle.disable_static()
self.assertEqual(a[0], 1)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
Qengineering.noreply@github.com
|
e05ca1e744cc58f496b63f7b2feb5c87157fb2bc
|
99d7a6448a15e7770e3b6f3859da043300097136
|
/src/mv/segment/region.py
|
b533e040ccccc4667f5e6e485620c1628d3b316d
|
[] |
no_license
|
softtrainee/arlab
|
125c5943f83b37bc7431ae985ac7b936e08a8fe4
|
b691b6be8214dcb56921c55daed4d009b0b62027
|
refs/heads/master
| 2020-12-31T07:54:48.447800
| 2013-05-06T02:49:12
| 2013-05-06T02:49:12
| 53,566,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
#===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
from traits.api import HasTraits, Bool
from traitsui.api import View, Item, TableEditor
#============= standard library imports ========================
from numpy import zeros_like, invert, percentile, ones_like
from skimage.filter import sobel, threshold_adaptive
from skimage.morphology import watershed
#============= local library imports ==========================
from src.mv.segment.base import BaseSegmenter
# from skimage.exposure.exposure import rescale_intensity
# from scipy.ndimage.morphology import binary_closing
cnt = 0
class RegionSegmenter(BaseSegmenter):
use_adaptive_threshold = Bool(True)
threshold_low = 0
threshold_high = 255
block_size = 20
def segment(self, src):
'''
src: preprocessing cv.Mat
'''
image = src.ndarray[:]
if self.use_adaptive_threshold:
# block_size = 25
markers = threshold_adaptive(image, self.block_size)
n = markers[:].astype('uint8')
n[markers == True] = 255
n[markers == False] = 1
markers = n
# print markers
# markers = markers.astype('uint8')
# n = ones_like(markers)
# n[markers] = 255
# print n
# markers[markers] = 255
# markers[not markers] = 1
# print markers
# markers = n.astype('uint8')
# markers = invert(markers).astype('uint8')
else:
markers = zeros_like(image)
markers[image < self.threshold_low] = 1
markers[image > self.threshold_high] = 255
# global cnt
# # remove holes
# if cnt % 2 == 0:
# markers = binary_closing(markers).astype('uint8') * 255
# cnt += 1
# print markers
elmap = sobel(image, mask=image)
wsrc = watershed(elmap, markers, mask=image)
return invert(wsrc)
# elmap = ndimage.distance_transform_edt(image)
# local_maxi = is_local_maximum(elmap, image,
# ones((3, 3))
# )
# markers = ndimage.label(local_maxi)[0]
# wsrc = watershed(-elmap, markers, mask=image)
# fwsrc = ndimage.binary_fill_holes(out)
# return wsrc
# if self.use_inverted_image:
# out = invert(wsrc)
# else:
# out = wsrc
# time.sleep(1)
# do_later(lambda:self.show_image(image, -elmap, out))
# return out
#============= EOF =============================================
|
[
"jirhiker@localhost"
] |
jirhiker@localhost
|
86ab1b6210f05d584469ce7ed92af41e66519780
|
451331db65a364d3b40eb18a1349e4349695dc87
|
/FinalFit/datacards/Systematics/PlotLimits.py
|
c8fe91a84d0e0ab8f2a48a5bef9b9d6d026159a5
|
[] |
no_license
|
hbakhshi/HaNaMiniAnalyzer
|
97bc5cdd72cd9651979184b4a1e50f6513100210
|
1d658c410318271b0c4981a5fba965924a15edff
|
refs/heads/master
| 2020-12-25T11:00:13.569640
| 2017-10-23T17:24:22
| 2017-10-23T17:24:22
| 60,080,249
| 0
| 1
| null | 2016-10-26T10:40:23
| 2016-05-31T10:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,304
|
py
|
from ROOT import TFile, TTree, TObject, TGraphAsymmErrors, TCanvas, kYellow, kBlack
import os
import stat
import array
import math
import shutil
from ROOT import RooWorkspace, TCanvas , RooFit, TColor, kBlue, kRed, kGreen, RooRealVar, RooConstVar, gROOT, TMath
from subprocess import call
import sys
AllNuisances = [ "CMS_hgg_nuisance_MaterialForward_13TeV",
"CMS_hgg_nuisance_ShowerShapeHighR9EB_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EEPhi_13TeV",
"CMS_hgg_nuisance_ShowerShapeHighR9EE_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EBPhi_13TeV",
"CMS_hgg_nuisance_ShowerShapeLowR9EE_13TeV",
"CMS_hgg_nuisance_MCScaleGain1EB_13TeV",
"CMS_hgg_nuisance_MaterialCentralBarrel_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EERho_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EBRho_13TeV",
"CMS_hgg_nuisance_MCScaleGain6EB_13TeV",
"CMS_hgg_nuisance_MCScaleLowR9EB_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EBRho_13TeV",
"CMS_hgg_nuisance_FNUFEB_13TeV",
"CMS_hgg_nuisance_FNUFEE_13TeV",
"CMS_hgg_nuisance_MCScaleLowR9EE_13TeV",
"CMS_hgg_nuisance_MCScaleHighR9EB_13TeV",
"CMS_hgg_nuisance_MaterialOuterBarrel_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EEPhi_13TeV",
"CMS_hgg_nuisance_MCScaleHighR9EE_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EBPhi_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EERho_13TeV",
"CMS_hgg_nuisance_ShowerShapeLowR9EB_13TeV"]
def GetLimits( syst_name ):
#path = "./SingleSystINWS11July/higgsCombine%s.Asymptotic.mH125.root" % (syst_name)
path = "./%s/higgsCombine%s.Asymptotic.mH125.root" % (sys.argv[1] , syst_name)
val = -100
val1sigmap = -100
val1sigmam = -100
val2sigmap = -100
val2sigmam = -100
if os.path.exists( path ) :
f = TFile.Open( path )
if f :
limit = f.Get("limit")
if not type(limit) == TTree :
val = -200
val1sigmap = -200
val1sigmam = -200
else :
for i in limit :
if i.quantileExpected == 0.5 :
val = i.limit
elif int(100*i.quantileExpected) in [15,16,17] :
val1sigmam = i.limit
elif int(100*i.quantileExpected) in [83,84,85] :
val1sigmap = i.limit
elif int(100*i.quantileExpected) in [2,3,4]:
val2sigmam = i.limit
elif int(100*i.quantileExpected) in [97,98,99]:
val2sigmap = i.limit
else :
print int(100*i.quantileExpected)
f.Close()
else :
val = -400
val1sigmap = -400
val1sigmam = -400
else:
print path
val = -300
val1sigmap = -300
val1sigmam = -300
if val <= 0 :
val /= 1000
print syst_name, val, val1sigmam, val1sigmap, val2sigmam, val2sigmap
return val, val1sigmam, val1sigmap, val2sigmam, val2sigmap
x = array.array('d')
y = array.array('d')
ex = array.array('d')
ey1sigmap = array.array('d')
ey1sigman = array.array('d')
ey2sigmap = array.array('d')
ey2sigman = array.array('d')
for syst in AllNuisances:
systName = syst.split("_")[3]
val, val1sigmam, val1sigmap, val2sigmam, val2sigmap = GetLimits( systName )
print AllNuisances.index( syst ) , syst
x.append( AllNuisances.index( syst ) )
y.append( val )
ex.append(0)
ey1sigmap.append( abs(val1sigmap-val) )
ey1sigman.append( abs(val1sigmam-val) )
ey2sigmap.append( abs(val2sigmap-val) )
ey2sigman.append( abs(val2sigmam-val) )
Bin = "Systematics"
canvas2 = TCanvas("sigma_bands")
graph_2sigma = TGraphAsymmErrors( len(x) , x , y , ex , ex , ey2sigman , ey2sigmap )
graph_2sigma.SetName( "GraphAsym_2SigmaBand_%s" % ( Bin ))
#graph_2sigma.SetTitle( Bin+ "(" +date+ ")" )
graph_2sigma.SetLineColor( kYellow-4)
graph_2sigma.SetFillColor( kYellow -4)
graph_2sigma.SetFillStyle( 1001 )
graph_2sigma.Draw( "a3" )
graph_1sigma = TGraphAsymmErrors( len(x) , x , y , ex , ex , ey1sigman , ey1sigmap )
graph_1sigma.SetName( "GraphAsym_1SigmaBand_%s" % (Bin ) )
#graph_1sigma.SetTitle( Bin + "(" +date+ ")" )
graph_1sigma.SetLineColor( kGreen - 4)
graph_1sigma.SetFillColor( kGreen -4)
graph_1sigma.SetFillStyle( 1001 )
graph_1sigma.Draw( "3 same" )
graph_1sigma.SetLineColor( kBlack )
graph_1sigma.SetLineWidth( 2 )
graph_1sigma.SetLineStyle( 2 )
graph_1sigma.SetMarkerColor( kBlack )
graph_1sigma.SetMarkerStyle( 0 )
graph_1sigma.Draw("lp X")
xax = graph_2sigma.GetXaxis()
pi = TMath.Pi()
i = 0
while i*pi/3 <= xax.GetXmax():
systName = AllNuisances[i].split("_")[3]
bin_index = xax.FindBin(i*pi/3)
xax.SetBinLabel(bin_index, systName )
i+=1
print i,bin_index,xax.GetBinCenter(bin_index), systName
canvas2.Modified()
canvas2.Update()
|
[
"hbakhshi@cern.ch"
] |
hbakhshi@cern.ch
|
bd33c7acbcad90997801a4d28eae63b1c866db4b
|
34c88cb508fe7ad10f258d220d645a60463a9063
|
/Misc/LUIGenerateGrid.py
|
b966b2efa47d986046a0f8a62a44e50754c24b0b
|
[] |
no_license
|
tzaeru/LUI
|
5a922f3f830a86c424831a6c5947a413c45f2bb8
|
14f59e24dc45d88f26214d6f51ed3565a7206374
|
refs/heads/master
| 2021-01-18T07:30:55.618553
| 2015-02-27T13:17:19
| 2015-02-27T14:08:39
| 31,418,557
| 0
| 0
| null | 2015-02-27T12:38:06
| 2015-02-27T12:38:05
| null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
from os import makedirs
from os.path import dirname, join, isdir
from panda3d.core import *
# Configuration
# source = raw_input("Source png file: ")
# destPath = dirname(source)
# borderSize = int(raw_input("Border size in pixel: "))
source = "btn_green_focus.png"
destPath = "../Builtin/res/"
destName = "ButtonMagicFocus_#.png"
borderSize = 7
def extractSubImage(x, y, w, h, name):
print "Extracting sub image to",name
subPNM = PNMImage(w, h, 4)
subPNM.copySubImage(img, 0, 0, x, y, w, h)
subPNM.write(destPath + destName.replace("#", name))
img = PNMImage(source)
w, h = img.getReadXSize(), img.getReadYSize()
if not isdir(destPath):
makedirs(destPath)
# top left
extractSubImage(0, 0, borderSize, borderSize, "TL")
# top right
extractSubImage(w-borderSize, 0, borderSize, borderSize, "TR")
# bottom left
extractSubImage(0, h-borderSize, borderSize, borderSize, "BL")
# bottom right
extractSubImage(w-borderSize, h-borderSize, borderSize, borderSize, "BR")
# top
extractSubImage(borderSize, 0, w-2*borderSize, borderSize, "Top")
# bottom
extractSubImage(borderSize, h - borderSize, w-2*borderSize, borderSize, "Bottom")
# left
extractSubImage(0, borderSize, borderSize, h-2*borderSize, "Left")
# right
extractSubImage(w-borderSize, borderSize, borderSize, h-2*borderSize, "Right")
# mid
# extractSubImage(borderSize, borderSize, w-2*borderSize, h-2*borderSize, "Mid")
extractSubImage(borderSize, borderSize, 1, h-2*borderSize, "Mid")
|
[
"tobias.springer1@googlemail.com"
] |
tobias.springer1@googlemail.com
|
029bec16424b0a6a283b9075d4ce821c32a71078
|
9189218d0520ff06fecfa4193466e5662a1628ba
|
/road_trip/road_trip.py
|
19225229adf8a091068a100cfdb2f3c0a783b9d3
|
[] |
no_license
|
TStand90/code-eval
|
1167e4139a60fead5026ddccb3ba2ede7c8f8666
|
6cbc4eba0cea4d980648cc238c20a3fcbf942aa5
|
refs/heads/master
| 2021-01-17T13:08:00.461910
| 2016-07-08T22:53:14
| 2016-07-08T22:53:14
| 41,000,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
import sys
def main(file_arg):
with open(file_arg) as f:
for line in f:
location_list = line.strip().split(';')
location_list = [location.strip() for location in location_list if location]
distances_from_start = []
for each_location in location_list:
name, distance = each_location.split(',')
distances_from_start.append(int(distance))
distances_from_start = sorted(distances_from_start)
distances = []
distances.append(distances_from_start[0])
for i, distance in enumerate(distances_from_start[1:]):
distances.append(distances_from_start[i+1] - distances_from_start[i])
print(','.join([str(distance) for distance in distances]))
if __name__ == '__main__':
main(sys.argv[1])
|
[
"tstand90@gmail.com"
] |
tstand90@gmail.com
|
529b590d1c69e0ec006f3264aa3e99a1908178bd
|
d7a4701e18be0f38820f5c15d80099fda6385f9f
|
/code-festival-2018-quala/A.py
|
0169373e8ab1935cb62b53a90e895bcbd3510868
|
[] |
no_license
|
shiki7/Atcoder
|
979a6f0eeb65f3704ea20a949940a0d5e3434579
|
c215c02d3bfe1e9d68846095b1bd706bd4557dd0
|
refs/heads/master
| 2022-05-21T16:59:01.529489
| 2022-04-29T11:26:42
| 2022-04-29T11:26:42
| 201,536,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
a = int(input())
b = int(input())
c = int(input())
s = int(input())
if 0 <= s - (a + b + c) <= 3:
print('Yes')
else:
print('No')
|
[
"bitbite8@gmail.com"
] |
bitbite8@gmail.com
|
b691b9bbb53e18dfee434ea542d15b52b5b2b775
|
7c0820998f6ed2f1f5ee82b8b7ffd67c3228bfb6
|
/pytest_training/conftest.py
|
46ce2679f535f34c569f0dec3ccd74d57edb06d5
|
[] |
no_license
|
youinmelin/practice2020
|
5127241eaccf3ec997bb10671008a9a7c5f9d741
|
47d376b6d264141c229b6afcc2be803f41fd611e
|
refs/heads/master
| 2022-12-12T00:28:22.293373
| 2020-09-22T08:29:37
| 2020-09-22T08:29:37
| 237,427,204
| 0
| 0
| null | 2022-11-04T19:10:12
| 2020-01-31T12:38:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
import pytest
import tasks
from tasks import Task
@pytest.fixture()
def tasks_db(tmpdir):
'''prepare for the test. before the test, build the db envirment'''
tasks.start_tasks_db(str(tmpdir),'tiny')
yield
tasks.stop_tasks_db()
@pytest.fixture()
def tasks_just_a_few():
"""All summaries and owners are unique."""
return (
Task('Write some code', 'Brian', True),
Task("Code review Brian's code", 'Katie', False),
Task('Fix what Brian did', 'Michelle', False))
@pytest.fixture()
def tasks_mult_per_owner():
"""Several owners with several tasks each."""
return (
Task('Make a cookie', 'Raphael'),
Task('Use an emoji', 'Raphael'),
Task('Move to Berlin', 'Raphael'),
Task('Create', 'Michelle'),
Task('Inspire', 'Michelle'),
Task('Encourage', 'Michelle'),
Task('Do a handstand', 'Daniel'),
Task('Write some books', 'Daniel'),
Task('Eat ice cream', 'Daniel'))
@pytest.fixture()
def db_with_3_tasks(tasks_db, tasks_just_a_few):
"""Connected db with 3 tasks, all unique."""
for t in tasks_just_a_few:
tasks.add(t)
@pytest.fixture()
def db_with_multi_per_owner(tasks_db, tasks_mult_per_owner):
"""Connected db with 9 tasks, 3 owners, all with 3 tasks."""
for t in tasks_mult_per_owner:
tasks.add(t)
|
[
"ygqs@sina.com"
] |
ygqs@sina.com
|
e253f5dfd8597281db7b5940b3b852b6df8bf7f1
|
95fcab4fd10cbd6bd3194002a82aee1337b75e82
|
/crazy_decrypter
|
59f65442d9c133d0fc67f6120b3cf57a54e2a00a
|
[
"MIT"
] |
permissive
|
Python3pkg/Crazy-Decrypter
|
56b8490a3159fd1482a0cba212d8835518fd2537
|
ee1dc91cf633d38131ba60e1675f5293bb83a323
|
refs/heads/master
| 2021-01-21T17:22:24.733510
| 2016-05-21T07:14:32
| 2016-05-21T07:14:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,535
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time, string, sys
import itertools, hashlib
NAME = 'Python Crazy Decrypter'
VERSION = '0.0.4'
AUTHOR = 'Summon Agus'
DESCRIPTION = NAME + ''' is real crazy tool
to decrypt md5, sha1, sha224, sha256, sha384, and sha512 with Brute Force method.'''
CHRS = string.printable.replace(' \t\n\r\x0b\x0c', '')
DICT_MODULE_TYPES = {
'md5' : 32,
'sha1' : 40,
'sha224' : 56,
'sha256' : 64,
'sha384' : 96,
'sha512' : 128
}
def print_help():
print '\n'+NAME
print 'Author : {}'.format(AUTHOR)
print 'Version : {}\n'.format(VERSION)
print DESCRIPTION
print '''\nPARAMETERS:
-m \t To try with specific module choice.
-a \t To try with all modules.
-c \t To try with specific charachters.
-ac \t To try with all charachters. \n\nUSAGE:
SPECIFIC MODULE
$ crazy_decrypter -m <module_type> <hashed> -c <chars> <min_length> <max_length>
$ crazy_decrypter -m md5 d73d1e93a306b8230410cbe496ec84bf -c ABC 1 2
$ crazy_decrypter -m <module_type> <hashed> -ac <min_length> <max_length>
$ crazy_decrypter -m md5 d73d1e93a306b8230410cbe496ec84bf -ac 1 2
ALL MODULES
$ crazy_decrypter -a <hashed> -c <chars> <min_length> <max_length>
$ crazy_decrypter -a d73d1e93a306b8230410cbe496ec84bf -c ABC 1 2
$ crazy_decrypter -a <hashed> -ac <min_length> <max_length>
$ crazy_decrypter -a d73d1e93a306b8230410cbe496ec84bf -ac 1 2
'''
def decrypter(choice, module_type, hashed, chrs, min_length, max_length):
if module_type in DICT_MODULE_TYPES.keys():
improt_module = getattr(hashlib, '{}'.format(module_type))
else:
print '\n The `{}` does not exist in the list module!\n Please try this: {}\n'.format(module_type, DICT_MODULE_TYPES.keys())
sys.exit()
if min_length > max_length:
print '\n Min-length must be longer than Max-length or as same as with Max-length.\n'
sys.exit()
if len(hashed) not in DICT_MODULE_TYPES.values():
print "\n Provided hash doesn't match any of known hashes bitmap."
print " Correct length for hases type:"
for k, i in sorted(DICT_MODULE_TYPES.iteritems()):
print ' -', k,':',i
print ''
sys.exit()
if choice == '-m' and len(hashed) != DICT_MODULE_TYPES[module_type]:
print "\n The hash `{}` is doesn't exist in `{}`.\n Please try another type!\n".format(hashed, module_type)
sys.exit()
end_result_chip = ''
try:
for n in range(min_length, max_length+1):
for xs in itertools.product(chrs, repeat=n):
result_chip = ''.join(xs)
hash_chip = improt_module(result_chip).hexdigest()
if hashed == hash_chip:
end_result_chip += result_chip
print 'Decrypt found : {}'.format(end_result_chip)
print 'Type Decrypt : {}'.format(module_type)
print 'End time : {}\n'.format(time.strftime('%H:%M:%S'))
sys.exit()
else:
print ' *** Please drink your coffee first! ***'
print '\t{} {}\n'.format(NAME, VERSION)
print 'CTRL+C to Exit!'
print 'Charachters to try : {}'.format(chrs)
print 'Min-length : {}'.format(min_length)
print 'Max-length : {}'.format(max_length)
if choice == '-a':
print 'Type Decrypt found : {}'.format(module_type)
else:
print 'Type Decrypt now : {}'.format(module_type)
print 'Trying with : {} - {}'.format(result_chip, hash_chip)
time.sleep(0.01)
print("\033c")
except KeyboardInterrupt:
print 'Finished!\n'
sys.exit()
if end_result_chip == '':
print 'Not Found!'
print 'End time: {}\n'.format(time.strftime('%H:%M:%S'))
sys.exit()
else: pass
if __name__ == '__main__':
if len(sys.argv) == 1 or len(sys.argv) > 8: print_help()
elif sys.argv[1] == '-m':
try:
if sys.argv[4] == '-c':
decrypter(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5], int(sys.argv[6]), int(sys.argv[7]))
elif sys.argv[4] == '-ac':
decrypter(sys.argv[1], sys.argv[2], sys.argv[3], CHRS, int(sys.argv[5]), int(sys.argv[6]))
else: print_help()
except IndexError: print_help()
elif sys.argv[1] == '-a':
try:
len_hases = len(sys.argv[2])
try:
module_type = DICT_MODULE_TYPES.keys()[DICT_MODULE_TYPES.values().index(len_hases)]
except ValueError:
print "\n Provided hash doesn't match any of known hashes bitmap."
print " Correct length for hases type:"
for k, i in sorted(DICT_MODULE_TYPES.iteritems()):
print ' -', k,':',i
print ''
sys.exit()
if sys.argv[3] == '-c':
decrypter(sys.argv[1], module_type, sys.argv[2], sys.argv[4], int(sys.argv[5]), int(sys.argv[6]))
elif sys.argv[3] == '-ac':
decrypter(sys.argv[1], module_type, sys.argv[2], CHRS, int(sys.argv[4]), int(sys.argv[5]))
else: print_help()
except IndexError: print_help()
else: print_help()
|
[
"summon.agus@gmail.com"
] |
summon.agus@gmail.com
|
|
2cd54f26168a16fdb61877804a0372677aa8d4ea
|
99f6c5b7a6b6840163b32d633e658678d5829b46
|
/practice/autumn/fibonacci.py
|
5020c32d0e53287a274b521a1ecefd3b96822b40
|
[] |
no_license
|
aliceayres/leetcode-practice
|
32f2695a567317013b567a68863f2c95c75b438b
|
0743cbeb0e9aa4a8a25f4520a1e3f92793fae1ee
|
refs/heads/master
| 2021-06-02T15:11:29.946006
| 2020-02-06T04:06:55
| 2020-02-06T04:06:55
| 131,126,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
'''
Fibonacci
'''
class Solution:
def solute(self,n):
return self.matrix(n)
def naive(self,n):
if n == 0:
return 0
if n == 1:
return 1
if n > 1:
return self.naive(n-1)+self.naive(n-2)
def bottom(self,n):
cache = []
for i in range(n+1):
if i == 0:
cache += [0]
elif i == 1:
cache += [1]
else:
cache += [cache[i-1] + cache[i-2]]
return cache[n]
def matrixmulti(self,a,b):
m = len(a)
n = len(b[0])
p = len(a[0])
matrix = [[] for i in range(m)]
for i in range(m):
for j in range(n):
sum = 0
for k in range(p):
sum += a[i][k]*b[k][j]
matrix[i].append(sum)
return matrix
def matrixpower(self,matrix,n):
power = matrix
for i in range(n-1):
power = self.matrixmulti(power,matrix)
return power
def matrix(self,n):
mt = [[1,1],[1,0]]
fb = self.matrixpower(mt,n-1)
return fb[0][0]
if __name__ == '__main__':
slt = Solution()
n = 10
fb = slt.solute(n)
print(fb)
|
[
"yeziqian@ctsig.com"
] |
yeziqian@ctsig.com
|
62812ccf590249b14771c4b7938a4c52c2551f53
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/network/tests/unit/plugins/modules/network/apconos/test_apconos_command.py
|
e7070abb121baad0615be7211de59725358f3b9d
|
[
"MIT",
"GPL-3.0-or-later",
"GPL-3.0-only"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 4,564
|
py
|
# (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible_collections.community.network.tests.unit.compat.mock import patch
from ansible_collections.community.network.plugins.modules.network.apconos import apconos_command
from ansible_collections.community.network.tests.unit.plugins.modules.utils import set_module_args
from .apconos_module import TestApconosModule, load_fixture
class TestApconosCommandModule(TestApconosModule):
module = apconos_command
def setUp(self):
super(TestApconosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible_collections.community.network.plugins.modules.network.apconos.apconos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestApconosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
filename = str(item).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_apcon_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout_lines']), 1)
self.assertEqual(result['stdout_lines'][0][0], 'APCON')
def test_apcon_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout_lines']), 2)
self.assertEqual(result['stdout_lines'][0][0], 'APCON')
self.assertEqual(result['stdout_lines'][1][0], 'APCON')
def test_apcon_command_wait_for(self):
wait_for = 'result[0] contains "APCON"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_apcon_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_apcon_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_apcon_command_match_any(self):
wait_for = ['result[0] contains "test string"',
'result[0] contains "VERSION"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_apcon_command_match_all(self):
wait_for = ['result[0] contains "COMPONENT"',
'result[0] contains "MODEL"',
'result[0] contains "VERSION"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_apcon_command_match_all_failure(self):
wait_for = ['result[0] contains "APCON OS"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_apcon_command_checkmode_not_warning(self):
commands = ['enable ssh']
set_module_args(dict(commands=commands, _ansible_check_mode=False))
result = self.execute_module(changed=True)
self.assertEqual(result['warnings'], [])
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
739421ee81e9e6b444bf86baf71044687467e859
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sbss_1210+511/sdB_SBSS_1210+511_lc.py
|
c7a524db24e2e93df6d5e26eeb3fa943b51619ff
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[183.124667,50.900608], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SBSS_1210+511 /sdB_SBSS_1210+511_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
302e910ed7e156d7359de27096dd03221bb48cb4
|
ac5ba4cc5f1636b1ef48927ea7a7d9c214f4789d
|
/CFEBBufferOverloadProducer/test/submit_NtupleProducer.py
|
02316b93e0570526faf429e4e1830ff39b4dfdce
|
[] |
no_license
|
sunilbansal/CSCPostLS2RateStudies
|
c5daa8841288bd7492efc30f779e9108c26b3b39
|
1411e6ea3e91242e6c4ef35163c71d50781f969f
|
refs/heads/master
| 2021-01-18T05:01:16.767500
| 2015-07-07T17:16:43
| 2015-07-07T17:16:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
'''
A simple script to easily submit all jobs for buffer rereco studies.
'''
import subprocess
BufferVersion = 'v7'
version = 'v2'
datasets = ['HZZ', 'ZZ', 'Zprime']
failureStrings = ['0p00', '0p01', '0p02', '0p05', '0p10', '0p20', '0p03', '0p50', '1p00']
failureModes = ["BOTH", "CFEB", "DDU"]
for dataset in datasets:
for failureString in failureStrings:
for failureMode in failureModes:
commandString = "farmoutAnalysisJobs $1 --input-file-list=inputs/inputs_BufferOverload_%s_%s_%s_%s.txt BufferOverload_%s_%s_%s_StandAlone_%s $CMSSW_BASE CSCPostLS2RateStudies/NtupleProducer/test/makeStandAloneNtuple_cfg.py 'outputFile=$outputFileName' 'inputFiles=$inputFileNames'" % (dataset, failureString, failureMode, BufferVersion, dataset, failureString, failureMode, version)
print commandString
subprocess.call(commandString,shell=True)
|
[
"dntaylor@wisc.edu"
] |
dntaylor@wisc.edu
|
dc8a8cd9cca1d12c7b9559429f70f1a2aa8a6dde
|
eaea9ca458ae4949e049743e6d712c3389dced00
|
/cesm_hist2tseries/__init__.py
|
6c0f8ee7181dbbd2399cd85ed26c937f2e76f9ea
|
[
"Apache-2.0"
] |
permissive
|
mnlevy1981/cesm-hist2tseries
|
1702fc1f192c140df3a70dd777892df46b48e435
|
c9bfcb5b16783bda1849f2d402897f0f6ef3b0c4
|
refs/heads/main
| 2023-04-14T04:35:36.554423
| 2021-05-04T15:54:49
| 2021-05-04T15:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
#!/usr/bin/env python3
# flake8: noqa
"""Top-level module for cesm-hist2tseries ."""
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound: # pragma: no cover
# package is not installed
__version__ = 'unknown' # pragma: no cover
|
[
"axbanihirwe@ualr.edu"
] |
axbanihirwe@ualr.edu
|
fef950be4883a6c91c31bc909c9b76fe895df6f7
|
ccdeae68e468ad399a89181c37bba4490bcdc259
|
/scripts/40-genNonLinModelDistMatTopOnRLOnOther.py
|
5a155d8ec1e6b4f1987b13299f879ceea31190fd
|
[] |
no_license
|
jameshughes89/NonlinearModelsFMRI-2
|
19262d4494aa6adc0e9bd9592069ad6b757dda6b
|
a507a41d0a0a728d02616023aea0e66fafc1c387
|
refs/heads/master
| 2021-09-06T17:05:38.086733
| 2018-02-07T15:19:23
| 2018-02-07T15:19:23
| 109,417,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,822
|
py
|
'''
creates a large distance matrix for all non liear models.
CHANGE THE iFile line to switch between LR/RL --- Also be sure to save the output file names too!
'''
import csv
from math import *
import numpy as np
import matplotlib.pyplot as plt
import topExpressions_NL_RL_EMOTION as emotion
import topExpressions_NL_RL_GAMBLING as gambling
import topExpressions_NL_RL_LANGUAGE as language
import topExpressions_NL_RL_MOTOR as motor
import topExpressions_NL_RL_RELATIONAL as relational
import topExpressions_NL_RL_SOCIAL as social
import topExpressions_NL_RL_WM as wm
all_functions_line = emotion.getFuncs() + gambling.getFuncs() + language.getFuncs() + motor.getFuncs() + relational.getFuncs() + social.getFuncs() + wm.getFuncs()
tasks = ["EMOTION", "GAMBLING", "LANGUAGE", "MOTOR", "RELATIONAL", "SOCIAL", "WM"]
lasts = ["7", "2", "16", "21", "28", "3", "21"]
#subjects =[100307, 100408, 101006, 101107, 101309, 101410, 101915, 102008, 102311, 102816, 103111, 103414, 103515, 103818, 104012, 104820, 105014, 105115, 105216, 105923, 106016, 106319, 106521, 107321, 107422, 108121, 108323, 108525, 108828, 109123, 109325, 110411, 111312, 111413, 111514, 111716, 112819, 113215, 113619, 113821, 113922, 114419, 114924, 115320, 116524, 117122, 117324, 118528, 118730, 118932, 119833, 120111, 120212, 120515, 121315, 121618, 122317, 122620, 123117, 123420, 123925, 124220, 124422, 124826, 125525, 126325, 126628, 127630, 127933, 128127, 128632, 129028, 130013, 130316, 130922, 131217, 131722, 131924, 132118, 133019, 133625, 133827, 133928, 134324, 135225, 135528, 135932, 136227, 136833, 137027, 137128, 137633, 137936, 138231, 138534, 139233, 139637, 140117, 140824, 140925, 141422, 141826, 142424, 142626, 142828, 143325, 144226, 144832, 145531, 145834]
subjects =[100307, 100408, 101006, 101107, 101309, 101410, 101915, 102008, 102311, 102816,
103111, 103414, 103515, 103818, 104012, 104820, 105014, 105115, 105216, 105923,
106016, 106319, 106521, 107321, 107422, 108121, 108323, 108525, 108828, 109123,
109325, 110411, 111312, 111413, 111514, 111716, 113215, 113619, 113922, 114419]
matrixMSE = []
matrixABE = []
matrixMIN = []
lastsCount = 0
for t in tasks:
fs='funcsL_' + t + ' = ['
count = 0
for s in subjects:
print t, s
ALL = []
#iFile = csv.reader(open("/home/james/Desktop/nData/" + t + "_"+str(s)+"_2_L" + lasts[lastsCount] + "_Z.csv",'r'))
iFile = csv.reader(open("/home/james/Desktop/nData/" + t + "_"+str(s)+"_2_L" + lasts[lastsCount] + "_Z.csv",'r'))
for l in iFile:
ALL.append(l)
ALL = np.array(ALL)
ALL = ALL.astype(float)
allmsE = []
allabE = []
for f in all_functions_line:
try:
msE = []
abE = []
for l in ALL:
try:
err = l[-1] - f(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9],l[10],l[11],l[12],l[13],l[14],l[15],l[16],l[17],l[18],l[19],l[20],l[21],l[22],l[23],l[24],l[25],l[26],l[27],l[28],l[29])
msE.append(err**2)
abE.append(abs(err))
except(ValueError, OverflowError, ZeroDivisionError):
msE.append(float('nan'))
abE.append(float('nan'))
allmsE.append((np.mean(msE)))
allabE.append((np.mean(abE)))
#allmsE.append(log(np.mean(msE)))
#allabE.append(log(np.mean(abE)))
except (ValueError, OverflowError, ZeroDivisionError):
print '\t\t\tBBBBBUSTTTEDDDD: ', t, s
allmsE.append(np.float('nan'))
allabE.append(np.float('nan'))
continue
matrixMSE.append(allmsE)
matrixABE.append(allabE)
allmin = np.zeros(len(allabE))
allmin[np.argsort(allabE)[0]] = 1
matrixMIN.append(allmin)
lastsCount +=1
np.savetxt('msEmat_NL_LR_topRL.csv', matrixMSE, delimiter=",")
np.savetxt('abEmat_NL_LR_topRL.csv', matrixABE, delimiter=",")
np.savetxt('minmat_NL_LR_topRL.csv', matrixMIN, delimiter=",")
|
[
"JamesHughes89@Gmail.com"
] |
JamesHughes89@Gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.