blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
88af47fd8f2bb9f4357a3ce833c9215cbd29d941 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/Amazon/S3/GetBucketLocation.py | a1f23c406555210b7f451832e408e86e7fd8a9b2 | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# GetBucketLocation
# Returns the Region where the bucket is stored.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetBucketLocation(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetBucketLocation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Amazon/S3/GetBucketLocation')
def new_input_set(self):
return GetBucketLocationInputSet()
def _make_result_set(self, result, path):
return GetBucketLocationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetBucketLocationChoreographyExecution(session, exec_id, path)
class GetBucketLocationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetBucketLocation
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
InputSet._set_input(self, 'AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
InputSet._set_input(self, 'AWSSecretKeyId', value)
def set_BucketName(self, value):
"""
Set the value of the BucketName input for this Choreo. ((required, string) The name of the bucket associated with the location you want to retrieve.)
"""
InputSet._set_input(self, 'BucketName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
InputSet._set_input(self, 'ResponseFormat', value)
class GetBucketLocationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetBucketLocation Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_LocationConstraint(self):
"""
Retrieve the value for the "LocationConstraint" output from this Choreo execution. ((string) The Region returned by the choreo. Valid values: blank (Default US Classic Region AKA us-east-1), EU (AKA eu-west-1), us-west-1, us-west-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, sa-east-1.)
"""
return self._output.get('LocationConstraint', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class GetBucketLocationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetBucketLocationResultSet(response, path)
| [
"cedric.warny@gmail.com"
] | cedric.warny@gmail.com |
2e7a19ed77da94d4c570f8512c0d330cd72973e2 | c2ae65792af1fab2e7843303ef90790819f872e8 | /Algorithm/BinaryTree/Construct-Binary-Tree-from-Inorder-and-Postorder-Traversal.py | cb21bbd260364c5cdc8da1af67095bbd9ee8d995 | [] | no_license | behappyyoung/PythonSampleCodes | 47c224ca76ce509a03c8b75ef6b4bf7f49ebdd7f | f7640467273fa8ea3c7e443e798737ca5bcea6f9 | refs/heads/master | 2023-03-15T00:53:21.034605 | 2023-02-13T17:12:32 | 2023-02-13T17:12:32 | 26,919,763 | 3 | 3 | null | 2023-03-07T12:45:21 | 2014-11-20T15:57:16 | Python | UTF-8 | Python | false | false | 851 | py | """
Given inorder and postorder traversal of a tree, construct the binary tree.
Note:
You may assume that duplicates do not exist in the tree.
inorder = [9,3,15,20,7]
postorder = [9,15,7,20,3]
3
/ \
9 20
/ \
15 7
"""
from _BinaryTree import TreeNode, print_btree
def buildTree(inorder, postorder):
if not inorder or not postorder:
return None
head_node = TreeNode(postorder.pop())
inorder_index = inorder.index(head_node.val)
# print(inorder, postorder, inorder_index, head_node.val)
head_node.left = buildTree(inorder[:inorder_index], postorder[:inorder_index])
head_node.right = buildTree(inorder[inorder_index+1:], postorder[inorder_index:])
return head_node
h = buildTree([9,3,15,20,7], [9,15,7,20,3])
print_btree(h)
h = buildTree([1,9,2,3,15,20,7], [1,2,9,15,7,20,3])
print_btree(h) | [
"behappyyoung@gmail.com"
] | behappyyoung@gmail.com |
c6c8384d2cd0660325972a4383b80d1d5322e456 | bf3379daa827d8141aa56f9e8f281312e7439707 | /sect/triangulation.py | 6f91ba3ed63ae682f333b55464618e555128d898 | [
"MIT"
] | permissive | lycantropos/sect | 7e8ff0010104150e98f07c97201cca7c8f3ec512 | e362cd9518def706822e785e1aa56509605ce567 | refs/heads/master | 2023-06-08T09:32:12.029217 | 2023-05-30T23:05:33 | 2023-05-30T23:05:33 | 249,774,307 | 27 | 1 | MIT | 2020-09-25T04:19:27 | 2020-03-24T17:32:30 | Python | UTF-8 | Python | false | false | 185 | py | from .core.delaunay.quad_edge import QuadEdge as _QuadEdge
from .core.delaunay.triangulation import Triangulation as _Triangulation
QuadEdge = _QuadEdge
Triangulation = _Triangulation
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
4a7df159a5e005fd017b018dd15e18b8c4d0f936 | bd69d00be499d9974a1f63612fcae5dccf3b0da7 | /tests/components/cover/test_command_line.py | bab0137f4f8a93c73a73e21925cddcc83ee0876e | [
"MIT"
] | permissive | casimec/home-assistant | 0d7d48cbf3852f4574e38eef22c3c3cc18db0245 | 705b3571f45cb36ce7b9899e6e1393370f3f90ee | refs/heads/dev | 2021-01-21T20:22:21.866130 | 2016-08-31T16:12:34 | 2016-08-31T16:12:34 | 67,054,337 | 1 | 0 | null | 2016-08-31T16:22:03 | 2016-08-31T16:22:03 | null | UTF-8 | Python | false | false | 2,996 | py | """The tests the cover command line platform."""
import os
import tempfile
import unittest
from unittest import mock
import homeassistant.core as ha
import homeassistant.components.cover as cover
from homeassistant.components.cover import (
command_line as cmd_rs)
class TestCommandCover(unittest.TestCase):
"""Test the cover command line platform."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = ha.HomeAssistant()
self.hass.config.latitude = 32.87336
self.hass.config.longitude = 117.22743
self.rs = cmd_rs.CommandCover(self.hass, 'foo',
'cmd_open', 'cmd_close',
'cmd_stop', 'cmd_state',
None) # FIXME
def teardown_method(self, method):
"""Stop down everything that was started."""
self.hass.stop()
def test_should_poll(self):
"""Test the setting of polling."""
self.assertTrue(self.rs.should_poll)
self.rs._command_state = None
self.assertFalse(self.rs.should_poll)
def test_query_state_value(self):
"""Test with state value."""
with mock.patch('subprocess.check_output') as mock_run:
mock_run.return_value = b' foo bar '
result = self.rs._query_state_value('runme')
self.assertEqual('foo bar', result)
mock_run.assert_called_once_with('runme', shell=True)
def test_state_value(self):
"""Test with state value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'cover_status')
test_cover = {
'statecmd': 'cat {}'.format(path),
'opencmd': 'echo 1 > {}'.format(path),
'closecmd': 'echo 1 > {}'.format(path),
'stopcmd': 'echo 0 > {}'.format(path),
'value_template': '{{ value }}'
}
self.assertTrue(cover.setup(self.hass, {
'cover': {
'platform': 'command_line',
'covers': {
'test': test_cover
}
}
}))
state = self.hass.states.get('cover.test')
self.assertEqual('unknown', state.state)
cover.open_cover(self.hass, 'cover.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual('open', state.state)
cover.close_cover(self.hass, 'cover.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual('open', state.state)
cover.stop_cover(self.hass, 'cover.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('cover.test')
self.assertEqual('closed', state.state)
| [
"paulus@paulusschoutsen.nl"
] | paulus@paulusschoutsen.nl |
c46f668e8d39ef46d0d285c3adb60a497e03827d | 4cc143f0fdafd566f414460c9f2cb105f5892842 | /AAAI_Workshop/M2_BPS/demo_balanced_scheduling.py | 7ba717cbeda698fe62a12b0ba3768cf3854cbb8f | [
"MIT"
] | permissive | littlemoon13/SUOD | 562868a61fa769871622d71df9e8109ad57ac0dc | 5f5b0f4bc011b13c5e304320cfa7079649e56f16 | refs/heads/master | 2020-12-12T01:38:58.013278 | 2020-01-15T00:29:51 | 2020-01-15T00:29:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,537 | py | import os
import sys
import time
import warnings
import numpy as np
import scipy as sp
from scipy.stats import rankdata
from sklearn.preprocessing import StandardScaler
from joblib import effective_n_jobs
from joblib import Parallel, delayed
from copy import deepcopy
from joblib import load
from pyod.models.iforest import IForest
from pyod.models.abod import ABOD
from pyod.models.feature_bagging import FeatureBagging
from pyod.models.lof import LOF
from pyod.models.cblof import CBLOF
from pyod.models.ocsvm import OCSVM
from pyod.models.pca import PCA
from pyod.models.knn import KNN
from pyod.models.hbos import HBOS
from pyod.models.mcd import MCD
from pyod.models.lscp import LSCP
if not sys.warnoptions:
warnings.simplefilter("ignore")
def indices_to_one_hot(data, nb_classes):
"""Convert an iterable of indices to one-hot encoded labels."""
targets = np.array(data).reshape(-1)
return np.eye(nb_classes)[targets]
def _parallel_build_estimators(n_estimators, clfs, X, total_n_estimators,
verbose):
# Build estimators
estimators = []
for i in range(n_estimators):
estimator = deepcopy(clfs[i])
if verbose > 1:
print("Building estimator %d of %d for this parallel run "
"(total %d)..." % (i + 1, n_estimators, total_n_estimators))
estimator.fit(X)
estimators.append(estimator)
return estimators
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
n_jobs = min(effective_n_jobs(n_jobs), n_estimators)
# Partition estimators between jobs
n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs,
dtype=np.int)
n_estimators_per_job[:n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
##############################################################################
n_jobs = 5
n_estimators_total = 1000
mat_file = 'cardio.mat'
mat_file_name = mat_file.replace('.mat', '')
print("\n... Processing", mat_file_name, '...')
mat = sp.io.loadmat(os.path.join('../datasets', mat_file))
X = mat['X']
y = mat['y']
X = StandardScaler().fit_transform(X)
# load the pre-trained model cost predictor
clf = load('rf_predictor.joblib')
classifiers = {
1: ABOD(n_neighbors=10),
2: CBLOF(check_estimator=False),
3: FeatureBagging(LOF()),
4: HBOS(),
5: IForest(),
6: KNN(),
7: KNN(method='mean'),
8: LOF(),
9: MCD(),
10: OCSVM(),
11: PCA(),
}
clfs = np.random.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
size=n_estimators_total)
clfs_real = []
for estimator in clfs:
clfs_real.append(classifiers[estimator])
X_w = indices_to_one_hot(clfs - 1, 11)
X_d1 = np.array([X.shape[0], X.shape[1]]).reshape(1, 2)
X_d = np.repeat(X_d1, len(clfs), axis=0)
X_c = np.concatenate((X_d, X_w), axis=1)
predict_time = clf.predict(X_c)
# Conduct Balanced Task Scheduling
n_estimators_list = []
ranks = rankdata(predict_time)
##########################################
ranks = 1 + ranks/len(clfs)
##########################################
rank_sum = np.sum(ranks)
chunk_sum = rank_sum / n_jobs
starts = [0]
index_track = 0
sum_check = []
for i in range(len(ranks) + 1):
if np.sum(ranks[starts[index_track]:i]) >= chunk_sum:
starts.append(i)
index_track += 1
starts.append(len(ranks))
for j in range(n_jobs):
sum_check.append(np.sum(ranks[starts[j]:starts[j + 1]]))
print('Worker', j+1, 'sum of ranks:', sum_check[j])
n_estimators_list.append(starts[j + 1] - starts[j])
print()
# Confirm the length of the estimators is consistent
assert (np.sum(n_estimators_list) == n_estimators_total)
assert (np.abs(rank_sum - np.sum(sum_check)) < 0.1 )
n_jobs = min(effective_n_jobs(n_jobs), n_estimators_total)
total_n_estimators = sum(n_estimators_list)
xdiff = [starts[n] - starts[n - 1] for n in range(1, len(starts))]
print(starts, xdiff)
start = time.time()
# https://github.com/joblib/joblib/issues/806
# max_nbytes can be dropped on other OS
all_results = Parallel(n_jobs=n_jobs, max_nbytes=None, verbose=True)(
delayed(_parallel_build_estimators)(
n_estimators_list[i],
clfs_real[starts[i]:starts[i + 1]],
X,
total_n_estimators,
verbose=True)
for i in range(n_jobs))
print('Balanced Scheduling Total Time:', time.time() - start)
#############################################################################
print()
clfs = np.random.choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
size=n_estimators_total)
clfs = np.sort(clfs)
clfs_real = []
for estimator in clfs:
clfs_real.append(classifiers[estimator])
n_jobs, n_estimators, starts = _partition_estimators(len(clfs), n_jobs=n_jobs)
total_n_estimators = sum(n_estimators)
xdiff = [starts[n] - starts[n - 1] for n in range(1, len(starts))]
print(starts, xdiff)
start = time.time()
# https://github.com/joblib/joblib/issues/806
# max_nbytes can be dropped on other OS
all_results = Parallel(n_jobs=n_jobs, max_nbytes=None, verbose=True)(
delayed(_parallel_build_estimators)(
n_estimators[i],
clfs_real[starts[i]:starts[i + 1]],
X,
total_n_estimators,
verbose=True)
for i in range(n_jobs))
print('Naive Split Total Time', time.time() - start)
print(mat_file_name, n_jobs, n_estimators)
| [
"yzhao062@gmail.com"
] | yzhao062@gmail.com |
e95632da1adefee06e982ddc97cbf509f70f9629 | 22c5aee2ac42532ec143bf716001f7615cd49bfa | /docs/source/conf.py | 9b9a82f493b4b7b7bef8b8641e87bd69c6fb231f | [
"BSD-3-Clause"
] | permissive | thelabnyc/django-activity-stream | 2cf1cb463444ceae81c187e99c1b5b7f3fc89a96 | d464e891349b7917069480ba97b5854e89f72e8c | refs/heads/master | 2021-01-17T06:37:20.282926 | 2020-04-08T20:39:19 | 2020-04-08T20:39:19 | 41,499,647 | 1 | 1 | BSD-3-Clause | 2020-04-08T20:39:21 | 2015-08-27T16:54:58 | Python | UTF-8 | Python | false | false | 5,683 | py | # -*- coding: utf-8 -*-
#
# Django Activity Stream documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 1 12:35:29 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'actstream.runtests.settings'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../actstream/runtests'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
import django
try:
django.setup()
except AttributeError:
pass
import actstream
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Django Activity Stream'
copyright = 'Django Activity Stream 2010-%s by Justin Quick. <br/> Activity Streams logo released under <a href="http://creativecommons.org/licenses/by/3.0/">Creative Commons 3.0</a>' % datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = actstream.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import alabaster
extensions.append('alabaster')
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
html_static_path = ['_static']
html_theme_options = {
'logo': 'logo.jpg',
'logo_text_align': 'center',
'description': 'Generic activity streams for Django',
'github_user': 'justquick',
'github_repo': 'django-activity-stream',
'travis_button': True,
'gittip_user': 'justquick',
'analytics_id': 'UA-42089198-1'
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoActivityStreamdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoActivityStream.tex', u'Django Activity Stream Documentation',
u'Justin Quick', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoactivitystream', u'Django Activity Stream Documentation',
[u'Justin Quick'], 1)
]
| [
"justquick@gmail.com"
] | justquick@gmail.com |
2606f5c477ba26d19aa5a7ea9655a7deb71dad0d | b87ea98bc166cade5c78d246aeb0e23c59183d56 | /samples/openapi3/client/petstore/python/petstore_api/model/array_of_enums.py | 1be72e32c39a15ed32c9747a09ed2ad02acf1db6 | [
"Apache-2.0"
] | permissive | holisticon/openapi-generator | 88f8e6a3d7bc059c8f56563c87f6d473694d94e5 | 6a67551ea54a1aa9a49eb48ee26b4e9bb7fb1272 | refs/heads/master | 2023-05-12T02:55:19.037397 | 2023-04-14T08:31:59 | 2023-04-14T08:31:59 | 450,034,139 | 1 | 0 | Apache-2.0 | 2022-01-20T09:34:14 | 2022-01-20T09:34:13 | null | UTF-8 | Python | false | false | 1,457 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
class ArrayOfEnums(
schemas.ListSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class MetaOapg:
@staticmethod
def items() -> typing.Type['StringEnum']:
return StringEnum
def __new__(
cls,
_arg: typing.Union[typing.Tuple['StringEnum'], typing.List['StringEnum']],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'ArrayOfEnums':
return super().__new__(
cls,
_arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> 'StringEnum':
return super().__getitem__(i)
from petstore_api.model.string_enum import StringEnum
| [
"noreply@github.com"
] | holisticon.noreply@github.com |
4f2e5926c098f182d3846624332d1614d10362d7 | 7b9527f6a66bf544071c07498163883ae33ff9ec | /python/11055.py | b9c735b13ce5f3b4c7e5a2d8d7d9dab189b6ae39 | [] | no_license | rhyun9584/BOJ | ec4133718934e59689cdcc0d3284bad9a412dc7a | f4c651da7c4840595175abf201d07151d4ac9402 | refs/heads/master | 2023-08-31T21:29:07.550395 | 2023-08-25T16:53:53 | 2023-08-25T16:53:53 | 225,122,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | import copy
N = int(input())
A = [int(x) for x in input().split()]
sum = copy.deepcopy(A)
for i in range(1, N):
for j in range(i):
if A[i] > A[j]:
sum[i] = max(sum[i], sum[j]+A[i])
print(max(sum)) | [
"rhyun9584@naver.com"
] | rhyun9584@naver.com |
78f7d9e47fc2f5dcf5e819f41a8d938b9c47dbe1 | 69a60cdf962de532d63aa6111ddd7e3f9663abf3 | /wagtail/tests/testapp/migrations/0024_validatedpage.py | 0dabd2129083748178ee9e2d6261712fec43eeed | [
"BSD-3-Clause"
] | permissive | JoshBarr/wagtail | 47b827dc7394a8ebda76a7cc40e343fcd181ad96 | 7b8fbf89dac69386dfeb57dd607f43ab42d1ffab | refs/heads/master | 2021-01-09T06:35:13.010607 | 2016-02-08T13:06:05 | 2016-02-08T13:06:05 | 33,353,097 | 1 | 2 | null | 2016-02-23T20:14:13 | 2015-04-03T07:49:11 | Python | UTF-8 | Python | false | false | 874 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-25 05:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0024_alter_page_content_type_on_delete_behaviour'),
('tests', '0023_mycustompage'),
]
operations = [
migrations.CreateModel(
name='ValidatedPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('foo', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"matt@west.co.tt"
] | matt@west.co.tt |
ba9f1e97ab03afb6a5fec1f7644f0bf55edb7a46 | 17737f86e3b9c98f82037f1676faa1a41614aaa5 | /manage.py | 6b1404bafc43dc492222bff2db78395846b91151 | [] | no_license | tsh/django-ajax-simple-chat | ea2ab7402ee8637c6bf6da677c3fabdaee8930d2 | 0b38f2dbd5e5ec64451e780c40ec67abe86ab59f | refs/heads/master | 2023-09-04T02:36:53.800947 | 2014-01-24T17:40:06 | 2014-01-24T17:40:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_Chat.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"dr.tallin@gmail.com"
] | dr.tallin@gmail.com |
7e27264993c1ec43d5c131acb31c0dc06e4e296f | 2ea33df726a1a3867ad7fecfc334e8dfb15c4458 | /packages/gosamcontrib-toolfile/package.py | bc59b55f7013ff8217c73671c28bad048155f409 | [] | no_license | gartung/cmssw-spack | fe0ad7997777cfe58de273a5cd2e2aa368d1a1e2 | 37795d92596303197710fb043723c03775339220 | refs/heads/master | 2023-01-22T11:40:03.835528 | 2021-08-04T15:03:34 | 2021-08-04T15:03:34 | 78,494,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class GosamcontribToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('gosamcontrib')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['gosamcontrib'].version
values['PFX'] = spec['gosamcontrib'].prefix
fname = 'gosamcontrib.xml'
contents = str("""
<tool name="gosamcontrib" version="$VER">
<client>
<environment name="GOSAMCONTRIB_BASE" default="$PFX"/>
<environment name="LIBDIR" default="$$GOSAMCONTRIB_BASE/lib"/>
<environment name="INCLUDE" default="$$GOSAMCONTRIB_BASE/include"/>
</client>
<runtime name="GOSAMCONTRIB_PATH" value="$$GOSAMCONTRIB_BASE" type="path"/>
<runtime name="ROOT_PATH" value="$$GOSAMCONTRIB_BASE" type="path"/>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
| [
"gartung@fnal.gov"
] | gartung@fnal.gov |
0c14d4d2ac20db2756834a8f42fb89cdc30b4671 | dc0a6e23ee55c4009f18e7c646c0e41598dcd0a9 | /src/utils.py | b564e2db7d64dfda46fac24a2f607ea7d0d7834a | [] | no_license | danbailo/ic | aabf189de058814a78219e7b0946e234b21e91db | ef75e3f01c2cf2f001c657aa87bf98c969a656c0 | refs/heads/master | 2023-01-11T15:36:11.665681 | 2020-11-18T19:50:18 | 2020-11-18T19:50:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import argparse
import os
def create_args():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v",
"--video",
type=str,
help="path to input video file")
return vars(ap.parse_args())
def delete_imgs():
op = input("Delete all images? ")
if op.lower() in ["s","y"]:
filelist = [ f for f in os.listdir("images") if f.endswith(".jpg") ]
for f in filelist:
os.remove(os.path.join("images", f))
def set_start_video(cap):
start = float(input("Input the time that you wish start the cut: "))
return start*1000.0
def get_end_video(cap):
end = float(input("Input the time that you wish end the cut: "))
return end*1000.0
def add_pad():
return int(input("Input the value of the pad: ")) | [
"danbailoufms@gmail.com"
] | danbailoufms@gmail.com |
9e62ec0fcaeb3720f83703d0b87c089994007002 | cbc4782342ad277b9f8cda805a57854ba3468edb | /SVDBias/SVDBias-surprise-ml.py | 1d901e4b3608f43e52e756d00bc9b4c71150066c | [] | no_license | qingkongmengnuan/BayesianRS | f7f1cdc7ca6336e3d18e98e441b65aa767846005 | e440f6bb26bdc9485d2ae15826c0900b7457b92d | refs/heads/master | 2022-11-30T20:00:20.305454 | 2020-08-11T08:25:11 | 2020-08-11T08:25:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,526 | py | # coding:utf-8
'''
@author: Jason.F
@data: 2019.07.15
@function: Implementing SVDBias with surprise lirbray
Dataset: Movielen-1m
Evaluating by hitradio,ndcg
'''
import numpy as np
import pandas as pd
import math
from collections import defaultdict
import heapq
import surprise as sp
#1. loading the dataset.
def load_dataset():
train_data = pd.read_csv("./data/ml-1m.train.rating", \
sep='\t', header=None, names=['user', 'item', 'rating'], \
usecols=[0, 1, 2], dtype={0: np.int32, 1: np.int32, 2 :np.float})
test_data = []
with open("./data/ml-1m.test.negative", 'r') as fd:
line = fd.readline()
while line != None and line != '':
arr = line.split('\t')
u = eval(arr[0])[0]
test_data.append([u, eval(arr[0])[1], 1])#one postive item
for i in arr[1:]:
test_data.append([u, int(i), 0]) #99 negative items
line = fd.readline()
return train_data, test_data
train_data,test_set = load_dataset()
#2. Transforming into data format of surprise and spliting the train-set and test-set
# The columns must correspond to user id, item id and ratings (in that order).
reader = sp.Reader(rating_scale=(0, 5))
spdata = sp.Dataset.load_from_df(train_data,reader)
trainset = spdata.build_full_trainset()
#testset = np.array(testset).tolist()
#3.training and evaluating
def getHitRatio(ranklist, gtItem):
for item in ranklist:
if item == gtItem:
return 1
return 0
def getNDCG(ranklist, gtItem):
for i in range(len(ranklist)):
item = ranklist[i]
if item == gtItem:
return math.log(2) / math.log(i+2)
return 0
print ("%3s%20s%20s" % ('K','HR@10', 'NDCG@10'))
for K in [8,16,32,64]:#iterations epoches
algo = sp.SVD(n_factors=K, n_epochs=20, lr_all=0.001, reg_all=0.01 )#NMF,SVDpp
algo.fit(trainset)
#print (algo.predict(str(1),str(1), r_ui=0, verbose=True))
predictions = algo.test(test_set)#testset include one positive and 99 negtive sample of every user.
user_iid_true_est = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
user_iid_true_est[uid].append((iid, true_r, est))
hits = []
ndcgs = []
for uid, iid_ratings in user_iid_true_est.items():
# Sort user ratings by estimated value
#iid_ratings.sort(key=lambda x: x[2], reverse=True) #sorted by est
scorelist = []
positem = -1
for iid, ture_r, est in iid_ratings:
if positem == -1: positem=iid #one positive item in first
scorelist.append([iid,est])
map_item_score = {}
for item, rate in scorelist: #turn dict
map_item_score[item] = rate
ranklist = heapq.nlargest(10, map_item_score, key=map_item_score.get)#default Topn=10
hr = getHitRatio(ranklist, positem)
hits.append(hr)
ndcg = getNDCG(ranklist, positem)
ndcgs.append(ndcg)
hitratio,ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
print ("%3d%20.6f%20.6f" % (K, hitratio, ndcg))
'''
nohup python -u SVDBias-surprise-ml.py > SVDBias-surprise-ml.log &
K HR@10 NDCG@10
8 0.260430 0.127373
16 0.260430 0.127286
32 0.260927 0.127980
64 0.259603 0.126590
''' | [
"sharpsword@163.com"
] | sharpsword@163.com |
b953777105e563ce297fc573f61fb938f2f8ba97 | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.ec2.NetworkInterfaceSecurityGroupAttachment-python/__main__.py | f1281ca3d2cd3bc2dcd7d4b7dfa5a2d565d096fe | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | import pulumi
import pulumi_aws as aws
ami = aws.get_ami(filters=[{
"name": "name",
"values": ["amzn-ami-hvm-*"],
}],
most_recent=True,
owners=["amazon"])
instance = aws.ec2.Instance("instance",
ami=ami.id,
instance_type="t2.micro",
tags={
"type": "test-instance",
})
sg = aws.ec2.SecurityGroup("sg", tags={
"type": "test-security-group",
})
sg_attachment = aws.ec2.NetworkInterfaceSecurityGroupAttachment("sgAttachment",
network_interface_id=instance.primary_network_interface_id,
security_group_id=sg.id)
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
e2c7d8563d77db0c7809b623e690dec82dce8949 | 31da199014d3fcbfa697c65743ceff554bd2bb75 | /cabinet/admin.py | 68a0b68e7d740b6ce6c82b558dad1a531f608402 | [
"BSD-2-Clause"
] | permissive | barseghyanartur/django-cabinet | 59c12d7e3aab80f6dd44ad1c6b53ceb85dd5bc5f | 38b4732c9b00d21718216271a2572a19ea35d25c | refs/heads/master | 2021-08-28T07:49:56.394240 | 2017-12-11T15:21:17 | 2017-12-11T15:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | from django.contrib import admin
from django.template.defaultfilters import filesizeformat
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from cabinet.base_admin import FileAdminBase
from cabinet.models import File
@admin.register(File)
class FileAdmin(FileAdminBase):
list_display = (
'admin_thumbnail',
'admin_file_name',
'admin_details',
)
list_display_links = (
'admin_thumbnail',
'admin_file_name',
)
def get_fieldsets(self, request, obj=None):
return [
(None, {
'fields': [field for field in (
'folder',
'caption',
'copyright',
'_overwrite' if obj else '',
) if field],
}),
(_('Image'), {
'fields': ('image_file', 'image_alt_text'),
'classes': (
('collapse',)
if (obj and not obj.image_file.name)
else ()
),
}),
(_('Download'), {
'fields': ('download_file',),
'classes': (
('collapse',)
if (obj and not obj.download_file.name)
else ()
),
}),
]
def admin_thumbnail(self, instance):
if instance.image_file.name:
return format_html(
'<img src="{}" alt=""/>',
instance.image_file.crop['50x50'],
)
elif instance.download_file.name:
return format_html(
'<span class="download download-{}">{}</span>',
instance.download_type,
instance.download_type.upper(),
)
return ''
admin_thumbnail.short_description = ''
def admin_file_name(self, instance):
return format_html(
'{} <small>({})</small>',
instance.file_name,
filesizeformat(instance.file_size),
)
admin_file_name.short_description = _('file name')
def admin_details(self, instance):
return format_html(
'<small>{}<br>{}</small>',
instance.caption,
instance.copyright,
)
admin_details.short_description = _('details')
| [
"mk@feinheit.ch"
] | mk@feinheit.ch |
da96e40b01e37315d3107d46905dbca1c84906a4 | a349c67e63c1cf8203c938ea1b1c9fa4f10252fa | /admin_back/steam/migrations/0001_initial.py | 81fb50fbfcc2c2729dfb2297e6bcc1bb755b1eee | [] | no_license | shubham2704/TopaAcademy_ | a068b45e62f857786c0aa43f6c47dfea4cdd85d0 | 859ed88489dbabebf0318a53eabe91eae80297ca | refs/heads/master | 2023-01-11T22:58:33.052314 | 2019-11-10T11:52:22 | 2019-11-10T11:52:22 | 220,782,125 | 0 | 0 | null | 2022-12-10T08:26:33 | 2019-11-10T11:45:51 | JavaScript | UTF-8 | Python | false | false | 691 | py | # Generated by Django 2.2.2 on 2019-07-03 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Steam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('steam_name', models.CharField(max_length=75)),
('steam_link_id', models.IntegerField(max_length=10)),
('steam_status', models.CharField(max_length=25)),
('date', models.DateField(auto_now_add=True)),
],
),
]
| [
"rs188282@gmail.com"
] | rs188282@gmail.com |
4fc23e4af088971f09b34d9af4fec8adcf08a223 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_058/ch19_2019_08_16_20_39_45_330140.py | adab61593c78e274fb4aca5ff8ea32f41ea78ff3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | from math import sin
from math import sqrt
def calcula_distancia_do_projetil(y,v,a):
p= (v**2)/(2*9.8)
s= sqrt(1+(2*9.8*y/((v**2)*(sin(a)**2)))
t= sin(2*a)
return p*s*t | [
"you@example.com"
] | you@example.com |
33a85e4ec2754e47ca9829202bec986216389e8a | e15768382ea9553ee5a2bd708e96796b3f3275d2 | /applications/ContactStructuralMechanicsApplication/tests/SmallTests.py | 0ba9e389f7a2766f5f04022b489ec84a5a03bcf3 | [] | no_license | KratosCSIC2016/Master | 2c8edcd0c6c5745d198bede2b7e8cac6811e6ede | f15871e1830829bb41fdcb3203f4db59147bee01 | refs/heads/master | 2020-06-17T20:08:41.482931 | 2016-12-13T12:03:49 | 2016-12-13T12:03:49 | 74,974,314 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | import os
# Import Kratos
from KratosMultiphysics import *
# Import KratosUnittest
import KratosMultiphysics.KratosUnittest as KratosUnittest
import Kratos_Execute_Solid_Test as Execute_Test
# This utiltiy will control the execution scope in case we need to acces files or we depend
# on specific relative locations of the files.
# TODO: Should we move this to KratosUnittest?
class controlledExecutionScope:
def __init__(self, scope):
self.currentPath = os.getcwd()
self.scope = scope
def __enter__(self):
os.chdir(self.scope)
def __exit__(self, type, value, traceback):
os.chdir(self.currentPath)
class StructuralMechanichsTestFactory(KratosUnittest.TestCase):
def setUp(self):
# Within this location context:
with controlledExecutionScope(os.path.dirname(os.path.realpath(__file__))):
# Initialize GiD I/O
parameter_file = open(self.file_name + "_parameters.json", 'r')
ProjectParameters = Parameters(parameter_file.read())
# Creating the model part
self.test = Execute_Test.Kratos_Execute_Test(ProjectParameters)
def test_execution(self):
# Within this location context:
with controlledExecutionScope(os.path.dirname(os.path.realpath(__file__))):
self.test.Solve()
def tearDown(self):
pass
class BasicCATest(StructuralMechanichsTestFactory):
file_name = "CA_test/basic_CA_test"
class SolidCATest(StructuralMechanichsTestFactory):
file_name = "CA_test/solid_CA_test"
class SimplePatchTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/simple_patch_test"
class SimplePatchNotMatchingATestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/simple_patch_notmatching_a_test"
class SimplePatchNotMatchingBTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/simple_patch_notmatching_b_test"
class TaylorPatchTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/taylor_patch_test"
class TaylorPatchDynamicTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/taylor_patch_dynamic_test"
class HertzSimpleSphereTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/simple_hertz_sphere_plate_test"
class HertzSimpleTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/hertz_simple_test"
class HertzSphereTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/hertz_sphere_plate_test"
class HertzCompleteTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_2D/hertz_complete_test"
class ThreeDPatchMatchingTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_3D/3D_contact_patch_matching_test"
class ThreeDPatchNotMatchingTestContact(StructuralMechanichsTestFactory):
file_name = "contact_test_3D/3D_contact_patch_nonmatching_test"
| [
"enriquebonetgil@hotmail.com"
] | enriquebonetgil@hotmail.com |
148f6a20ab8aa33250be698258046bce2d174ffb | ea9d5e38d55d7e69bcb4ae74bb3dfd3028fba4d3 | /closed/Alibaba/code/resnet50/tensorrt/ResNet50.py | 75122546670ae6df306c1587df690994bbec0d65 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ltechkorea/inference_results_v1.0 | cbe29d0f32c525b74525c1c215bf66d8385f3fd0 | 48e24f151f2625a579d34f0a721ad3698d173dbb | refs/heads/ltech | 2023-07-04T06:38:49.691181 | 2021-08-09T23:40:48 | 2021-08-10T01:33:22 | 387,295,024 | 0 | 0 | NOASSERTION | 2021-07-31T00:54:06 | 2021-07-19T00:20:19 | C++ | UTF-8 | Python | false | false | 5,029 | py | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorrt as trt
import os
import sys
import platform
import onnx
sys.path.insert(0, os.getcwd())
from importlib import import_module
from code.common import logging, dict_get, BENCHMARKS
from code.common import get_system
from code.common.builder import BenchmarkBuilder
RN50Calibrator = import_module("code.resnet50.tensorrt.calibrator").RN50Calibrator
RN50GraphSurgeon = import_module("code.resnet50.tensorrt.rn50_graphsurgeon").RN50GraphSurgeon
class ResNet50(BenchmarkBuilder):
"""Resnet50 engine builder."""
def __init__(self, args):
workspace_size = dict_get(args, "workspace_size", default=(1 << 30))
logging.info("Use workspace_size: {:}".format(workspace_size))
super().__init__(args, name=BENCHMARKS.ResNet50, workspace_size=workspace_size)
# Model path
self.model_path = dict_get(args, "model_path", default="build/models/ResNet50/resnet50_v1.onnx")
self.cache_file = None
self.need_calibration = False
if self.precision == "int8":
# Get calibrator variables
calib_batch_size = dict_get(self.args, "calib_batch_size", default=1)
calib_max_batches = dict_get(self.args, "calib_max_batches", default=500)
force_calibration = dict_get(self.args, "force_calibration", default=False)
cache_file = dict_get(self.args, "cache_file", default="code/resnet50/tensorrt/calibrator.cache")
preprocessed_data_dir = dict_get(self.args, "preprocessed_data_dir", default="build/preprocessed_data")
calib_data_map = dict_get(self.args, "calib_data_map", default="data_maps/imagenet/cal_map.txt")
calib_image_dir = os.path.join(preprocessed_data_dir, "imagenet/ResNet50/fp32")
# Set up calibrator
self.calibrator = RN50Calibrator(calib_batch_size=calib_batch_size, calib_max_batches=calib_max_batches,
force_calibration=force_calibration, cache_file=cache_file,
image_dir=calib_image_dir, calib_data_map=calib_data_map)
self.builder_config.int8_calibrator = self.calibrator
self.cache_file = cache_file
self.need_calibration = force_calibration or not os.path.exists(cache_file)
def initialize(self):
"""
Parse input ONNX file to a TRT network. Apply layer optimizations and fusion plugins on network.
"""
# Query system id for architecture
self.system = get_system()
self.gpu_arch = self.system.arch
# Create network.
self.network = self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
# Parse from onnx file.
parser = trt.OnnxParser(self.network, self.logger)
rn50_gs = RN50GraphSurgeon(self.model_path,
self.gpu_arch, self.device_type,
self.precision,
self.cache_file, self.need_calibration)
model = rn50_gs.process_onnx()
success = parser.parse(onnx._serialize(model))
if not success:
raise RuntimeError("ResNet50 onnx model processing failed! Error: {:}".format(parser.get_error(0).desc()))
# unmarking topk_layer_output_value, just leaving topk_layer_output_index
assert self.network.num_outputs == 2, "Two outputs expected"
assert self.network.get_output(0).name == "topk_layer_output_value",\
"unexpected tensor: {}".format(self.network.get_output(0).name)
assert self.network.get_output(1).name == "topk_layer_output_index",\
"unexpected tensor: {}".format(self.network.get_output(1).name)
logging.info("Unmarking output: {:}".format(self.network.get_output(0).name))
self.network.unmark_output(self.network.get_output(0))
# Set input dtype and format
input_tensor = self.network.get_input(0)
if self.input_dtype == "int8":
input_tensor.dtype = trt.int8
input_tensor.dynamic_range = (-128, 127)
if self.input_format == "linear":
input_tensor.allowed_formats = 1 << int(trt.TensorFormat.LINEAR)
elif self.input_format == "chw4":
input_tensor.allowed_formats = 1 << int(trt.TensorFormat.CHW4)
self.initialized = True
| [
"tjablin@google.com"
] | tjablin@google.com |
cdad24c6114bdf200c7a1307a44ec6a5fdd6ec85 | 2324d8e4544a9b813153ce0ed0f858972ea7f909 | /516-最长回文子序列.py | 0312c08a87b375e402d691bb4276da62e1ce97ac | [] | no_license | Terry-Ma/Leetcode | af8a4ad8059975f8d12b0351610336f1f5f01097 | cc7f41e2fb3ed5734c2a5af97e49a5bc17afbceb | refs/heads/master | 2021-08-10T16:40:20.482851 | 2021-07-03T08:35:56 | 2021-07-03T08:35:56 | 225,814,239 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
if len(s) <= 1:
return len(s)
dp = [[0] * len(s) for i in range(len(s))]
for i in range(len(s)):
dp[i][i] = 1
for i in range(len(s) - 2, -1, -1):
for j in range(i + 1, len(s)):
if s[i] == s[j]:
dp[i][j] = max(dp[i][j], 2 + dp[i + 1][j - 1])
else:
dp[i][j] = max(dp[i][j], dp[i + 1][j], dp[i][j - 1])
return dp[0][-1]
| [
"rssmyq@aliyun.com"
] | rssmyq@aliyun.com |
200279fd26cc47748b2257023c7ac14a3f65119b | 91e439530dcc8dd06e999fabf46a6418b928e2a9 | /bank.py | 53fe8f991d42e21ae8ae6b5c9f1a08453a8a592e | [] | no_license | bee-san/bank | c36a59eebe10baa42981a07f8bc457cb51a92c10 | 4db116e499f8593de5150a3e668d55e25d6678fa | refs/heads/master | 2021-09-17T10:00:35.502326 | 2018-06-30T13:51:07 | 2018-06-30T13:51:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,986 | py | import csv
from monzo.monzo import Monzo # Import Monzo Class
class finance():
"""
** ATTRIBUTES **
- initial_take_out
how much is initially taken out of the bank account (leaving 20%)
- updated_take_Out
this is the same as initial_take_out but is updated in the program when 10% is removed
- high_vol
10% of updated_take_out
remove 10% from updated_take_out
- invest
40% of updated_take_out
remove 40% from updated_take_out
- other_savings
the rest of the 50% (the full updated_take_out) or 50% of initial_take_out
** METHODS **
- __init__
constructor
- update_take_out
used to update the attribute of same name
- check
checks to see if the result of adding high_vol + invest + other_savings is = to initial_take_out
- printAll
prints all the attributes nicely
"""
def __init__(self, x):
""" Constructor method """
self.initial_natwest = x
latest_file = open("data.txt", "r")
latest_file_read = latest_file.read().split(" ")
latest = latest_file_read[0]
latest = float(latest)
self.natwest_balance = latest
# If you've put in £50 and theres £100 in the account this will mess it up
## that's why absolute is used.
# I also want to save 20% of the natwest account in the savings itself
newly_added = float(abs(self.initial_natwest - latest))
if self.natwest_balance > 600:
self.initial_take_out = float(newly_added)
self.more600 = True
self.over60020Percent = newly_added * 0.2
else:
self.initial_take_out = float(newly_added * 0.8)
self.updated_take_out = float(self.initial_take_out)
latest_file.close()
latest_file = open("data.txt", "w")
latest_file_read.append(str(abs(self.initial_natwest - self.initial_take_out)))
latest_file_data = ' '.join(latest_file_read)
latest_file.write(latest_file_data)
latest_file.close()
self.high_vol = 0.0
self.invest = 0.0
self.other_savings = 0.0
# vanguard requires at least £100 per month
if self.updated_take_out * 0.40 < 100.0:
if self.updated_take_out * 0.45 > 99.99:
self.invest = self.updated_take_out * 0.45
self.updateTakeout(self.invest)
self.high_vol = self.updated_take_out * 0.05
self.updateTakeout(self.high_vol)
elif self.updated_take_out * 0.5 > 99.99:
self.invest = self.updated_take_out * 0.5
self.updateTakeout(self.invest)
self.high_vol = self.updated_take_out * 0
self.updateTakeout(self.high_vol)
else:
self.invest = self.updated_take_out * 0.40
self.updateTakeout(self.invest)
self.high_vol = self.updated_take_out * 0.10
self.updateTakeout(self.high_vol)
self.left_with = self.initial_take_out - (self.invest + self.high_vol)
self.left_over = self.initial_natwest - self.initial_take_out
def updateTakeout(self, money):
""" Method to update the updated_take_out attrbutes"""
self.updated_take_out = self.updated_take_out - money
def check(self):
addition = self.other_savings + self.invest + self.high_vol
if addition == self.initial_take_out:
return True
else:
return False
def printAll(self):
print("\nYou should take out £" + str(round(self.initial_take_out)) + " out of your NatWest account")
if self.more600:
print("You should invest £" + str(self.over60020Percent) + " into something")
print("You should put £" + str(round(self.high_vol)) + " into cryptocurrencies")
if self.high_vol > 50:
print(" * Split 4 ways this is £" + str(round(self.high_vol / 4.00)))
print(" * Split 3 ways this is £" + str(round(self.high_vol / 3.0)))
print("You should put £" + str(round(self.invest)) + " into Vanguard LS 80")
print("This leaves you with £" + str(round(self.left_with)))
self.updateTakeout(30)
print("Taking away your subscriptions (£30) this is £" + str(round(self.updated_take_out)))
self.updateTakeout(75)
print("Take away £75 for your chocolate addiction this is £ " + str(round(self.updated_take_out)))
print("* 10% to put into a holiday pot is £" + str(round(self.updated_take_out * 0.10)))
print("* Split over 4 weeks this is £" + str(round(self.updated_take_out / 4.0)))
print("You should put roughly £" + str(round(self.updated_take_out / 11)) + " into each YNAB category")
print("Left in your NatWest is £" + str(round(self.left_over)))
if (self.left_with / 4.0) > 45.0:
print("Overall this was a good financial month, well done!!")
print("You should consider these things: ")
print(" * Putting 10% (£" + str(round(self.left_with * 0.10)) + ") into a holiday pot")
print(" * Reinvesting 20% (£" + str(round(self.left_with * 0.20)) + ") into the NatWest account")
else:
print("You'll have a better month ;)")
class monzoAPI():
"""
Monzo API in a class I've built
** ATTRIBUTES **
- spend_today
how much money was spent today
- balance
your current monzo balance
"""
def __init__(self):
client = Monzo('access_token_goes_here') # Replace access token with a valid token found at: https://developers.getmondo.co.uk/
account_id = client.get_first_account()['id'] # Get the ID of the first account linked to the access token
balance = client.get_balance(account_id) # Get your balance object
self.spend_today = balance['spend_today']
self.balance = balance['balance']
def get_transactions_api(self, account_id, client):
# gets transactions from monzo
return client.transactions(account_id)
def parse_transactions(self, transactions):
# parses the transactions into a list of dictionary items
transactionsParsed = []
for item in transactions:
# Merchant
try:
merchant = item.merchant.name
except AttributeError:
merchant = "No name"
except TypeError:
merchant = "No merchant for this item"
# Date
date = item.created
amount = float(item.amount)
transactionsParsed.append({
'date': date,
'transaction': amount,
'merchant': merchant})
return transactionsParsed
def sort_chronologically(transactions):
# sorts the transactions chronologically
transactions = sorted(transactions, key=lambda k: k['date'])
return transactions
def sort_months(transactions):
# Store the dates in stored_transactions with Y/M dates
sorted_transactions = {}
for transaction in transactions:
# Make a list of months
month = transaction['date'].strftime("%y/%m")
if month in sorted_transactions:
sorted_transactions[month].append(transaction)
else:
sorted_transactions[month] = [transaction]
print("sorted")
ordered_transactions = {}
for key in sorted(sorted_transactions.keys()):
print(key)
ordered_transactions[key] = sorted_transactions[key]
print("sorted")
print(ordered_transactions)
return ordered_transactions
x = float(input("How much money do you have in your account? "))
obj = finance(x)
obj.printAll()
| [
"brandonskerritt51@gmail.com"
] | brandonskerritt51@gmail.com |
8bae397672f78e82110159c58eca7b50e537f310 | 34811c84cf06e5b9cd0966fb1c7847e015608a72 | /monitoreo/simas/urls.py | 87b89970ac3a6a36e37654bac7adcba9f0d98046 | [] | no_license | fitoria/MonitoreoSimas | ba6f7e13b84d180d676713e9464796305201051b | 1570701eb425ca021b1f1df5ab9fa892b62de208 | refs/heads/master | 2021-01-18T06:12:41.905008 | 2011-05-19T20:48:58 | 2011-05-19T20:48:58 | 1,243,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | import os
from django.conf.urls.defaults import *
from django.conf import settings
from models import Encuesta
urlpatterns = patterns('monitoreo.simas.views',
(r'^index/$', 'inicio'),
(r'^index/ajax/organizaciones/(?P<departamento>\d+)/$', 'get_organizacion'),
(r'^index/ajax/municipio/(?P<departamento>\d+)/$', 'get_municipios'),
(r'^index/ajax/comunidad/(?P<municipio>\d+)/$', 'get_comunidad'),
#graficas para los indicadores
(r'^grafo/organizacion/(?P<tipo>\w+)/$', 'organizacion_grafos'),
(r'^grafo/agua-disponibilidad/(?P<tipo>\d+)/$', 'agua_grafos_disponibilidad'),
(r'^grafo/fincas/(?P<tipo>\w+)/$', 'fincas_grafos'),
(r'^grafo/arboles/(?P<tipo>\w+)/$', 'arboles_grafos'),
(r'^grafo/manejosuelo/(?P<tipo>\w+)/$', 'grafo_manejosuelo'),
(r'^grafo/ingreso/(?P<tipo>\w+)/$', 'grafos_ingreso'),
(r'^grafo/bienes/(?P<tipo>\w+)/$', 'grafos_bienes'),
(r'^grafo/ahorro-credito/(?P<tipo>\w+)/$', 'ahorro_credito_grafos'),
(r'^mapa/$', 'obtener_lista'),
(r'^(?P<vista>\w+)/$', '_get_view'),
)
| [
"carcas@sacrac.info"
] | carcas@sacrac.info |
bb6d634403b1b873f897897d279b35f55ec05aa7 | e42197009793944a7d54873c71999daa787ca897 | /src/app.py | d955ebb428b52bb4e680247d49eccda612ca8153 | [
"Apache-2.0"
] | permissive | kferrone/kubernetes-webhook-examples | a686a49bb27ef9efe7b926e793623a08f2f33379 | c1a4c817badb0b410d0cc7fa57a5fd4a185f6325 | refs/heads/master | 2023-04-11T00:39:25.091165 | 2019-08-29T12:14:28 | 2019-08-29T12:14:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | import base64
import copy
import http
import json
import random
import jsonpatch
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route("/validate", methods=["POST"])
def validate():
allowed = True
try:
for container_spec in request.json["request"]["object"]["spec"]["containers"]:
if "env" in container_spec:
allowed = False
except KeyError:
pass
return jsonify(
{
"response": {
"allowed": allowed,
"uid": request.json["request"]["uid"],
"status": {"message": "env keys are prohibited"},
}
}
)
@app.route("/mutate", methods=["POST"])
def mutate():
spec = request.json["request"]["object"]
modified_spec = copy.deepcopy(spec)
try:
modified_spec["metadata"]["labels"]["example.com/new-label"] = str(
random.randint(1, 1000)
)
except KeyError:
pass
patch = jsonpatch.JsonPatch.from_diff(spec, modified_spec)
return jsonify(
{
"response": {
"allowed": True,
"uid": request.json["request"]["uid"],
"patch": base64.b64encode(str(patch).encode()).decode(),
"patchtype": "JSONPatch",
}
}
)
@app.route("/health", methods=["GET"])
def health():
return ("", http.HTTPStatus.NO_CONTENT)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True) # pragma: no cover
| [
"gareth@morethanseven.net"
] | gareth@morethanseven.net |
dc85d1535c4d5fb53bc4ac15c57457735f27d609 | 5a5f62501ac77ba50a9661ed54322748a598438a | /SUASImageParser/modules/log_parser.py | 334df75b85e39f9376f9aef9a7a6ad7bd21e379e | [
"MIT"
] | permissive | OpenGelo/SUAS-Competition | fe5b2a283eb4f7dc7be666194e7ff909e85d2d1f | 75eef01223bda382381bfe160cc2e19b022397f8 | refs/heads/master | 2021-01-20T16:21:31.305561 | 2016-05-05T14:37:16 | 2016-05-05T14:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import cv2
class PixhawkLogParser:
"""
Parses Pixhawk's logs to identify vital information to the mission.
"""
def __init__(self):
self.log = ""
def parse(self, logfile, condition_to_trigger):
"""
Parse the log files looking for the following components:
1) GPS location
2) Heading
The method uses the condition "condition_to_trigger" to determine
when the above data needs to be captured. It grabs data when
appropriate, adds it to a list of dictionaries, and finally returns
that list.
"""
# Returning the parsed log data
return [{}]
| [
"valetolpegin@gmail.com"
] | valetolpegin@gmail.com |
7e126dca8e793f832da0623feaad6843af2eadc2 | 7896baeb297e131bab53cfbff712d1fd77bccede | /magenta/models/image_stylization/image_stylization_train_with_mask.py | d86cf95b9e2d8fdb1b13053b4d2a451ccd5cb3b3 | [
"Apache-2.0"
] | permissive | gombru/magenta_styleTransfer | 599b85b24dd406a82df271bb769fe3dc1fa19f0b | bd41b0bf3bb18988653e4a355d95dac8632e814f | refs/heads/master | 2020-04-11T23:11:47.133793 | 2019-02-12T12:12:50 | 2019-02-12T12:12:50 | 162,159,299 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,158 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains the N-styles style transfer model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
import tensorflow as tf
from magenta.models.image_stylization import image_utils
from magenta.models.image_stylization import learning
from magenta.models.image_stylization import model
from magenta.models.image_stylization import vgg
slim = tf.contrib.slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1.0}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 1e-4, "vgg_16/conv2": 1e-4,'
' "vgg_16/conv3": 1e-4, "vgg_16/conv4": 1e-4}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('num_styles', None, 'Number of styles.')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0,
'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 40000, 'Number of training steps.')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('master', '',
'Name of the TensorFlow master to use.')
flags.DEFINE_string('style_coefficients', None,
'Scales the style weights conditioned on the style image.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
FLAGS = flags.FLAGS
def main(unused_argv=None):
with tf.Graph().as_default():
# Force all input processing onto CPU in order to reserve the GPU for the
# forward inference and back-propagation.
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
worker_device=device)):
print("Using COCOText dataset")
inputs, masks = image_utils.COCOText_inputs(FLAGS.batch_size,
FLAGS.image_size)
# Load style images and select one at random (for each graph execution, a
# new random selection occurs)
_, style_labels, style_gram_matrices = image_utils.style_image_inputs(
os.path.expanduser(FLAGS.style_dataset_file),
batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,
square_crop=True, shuffle=True)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Process style and weight flags
num_styles = FLAGS.num_styles
if FLAGS.style_coefficients is None:
style_coefficients = [1.0 for _ in range(num_styles)]
else:
style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
if len(style_coefficients) != num_styles:
raise ValueError(
'number of style coefficients differs from number of styles')
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Rescale style weights dynamically based on the current style image
style_coefficient = tf.gather(
tf.constant(style_coefficients), style_labels)
style_weights = dict((key, style_coefficient * value)
for key, value in style_weights.iteritems())
# Define the model
stylized_inputs = model.transform(
inputs,
normalizer_params={
'labels': style_labels,
'num_categories': num_styles,
'center': True,
'scale': True})
# Compute losses.
total_loss, loss_dict = learning.total_loss(
inputs, stylized_inputs, style_gram_matrices, content_weights,
style_weights)
for key, value in loss_dict.iteritems():
tf.summary.scalar(key, value)
# Set up training
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss, optimizer, clip_gradient_norm=FLAGS.clip_gradient_norm,
summarize_gradients=False)
# Function to restore VGG16 parameters
# TODO(iansimon): This is ugly, but assign_from_checkpoint_fn doesn't
# exist yet.
saver = tf.train.Saver(slim.get_variables('vgg_16'))
def init_fn(session):
saver.restore(session, vgg.checkpoint_file())
# Run training
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_fn,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
e491f2bc8734c96214c5a43225e5aa586056dfbe | 2a318f4c8372c75224b2d79106ef52d8f4375e71 | /python/get_childfolders.py | 791db7a1c597c37eaff85ff6824fc483f8880fa2 | [] | no_license | keyur32/graph-snippets | 0d4bacc66b5fb0bbfddb73695fa61a5538eaf038 | e416d3ad86abdb30449325c06758e8cc6d73c137 | refs/heads/master | 2021-01-23T05:29:59.155567 | 2017-06-01T02:11:23 | 2017-06-01T02:11:23 | 92,971,791 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | import http.client
conn = http.client.HTTPSConnection("graph.microsoft.com")
conn.request("GET", "/v1.0/me/mailFolders/%7Bid%7D/childFolders")
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
| [
"keyur32@hotmail.com"
] | keyur32@hotmail.com |
87fd0e1e366ef2caef73ad6ac6b0ab25bc98a9e6 | 13d93c2922005af35056d015f1ae3ebebe05ee31 | /python/oreilly/cours_python/chap04/fibo1.py | d1eeb07bc2b72494a1ed0c68ba53211f802586da | [] | no_license | scls19fr/openphysic | 647cc2cdadbdafd050d178e02bc3873bd2b07445 | 67bdb548574f4feecb99b60995238f12f4ef26da | refs/heads/master | 2021-04-30T23:16:26.197961 | 2020-11-16T20:21:17 | 2020-11-16T20:21:17 | 32,207,155 | 1 | 1 | null | null | null | null | ISO-8859-1 | Python | false | false | 551 | py | #! /usr/bin/env python
# -*- coding: Latin-1 -*-
# Premier essai de script Python
# petit programme simple affichant une suite de Fibonacci, c.à.d. une suite
# de nombres dont chaque terme est égal à la somme des deux précédents.
print "Suite de Fibonacci :"
a,b,c = 1,1,1 # a & b servent au calcul des termes successifs
# c est un simple compteur
print 1 # affichage du premier terme
while c<15: # nous afficherons 15 termes au total
a,b,c = b,a+b,c+1
print b
| [
"s.celles@gmail.com@41f3eeec-7763-abce-c6e2-0c955b6d8259"
] | s.celles@gmail.com@41f3eeec-7763-abce-c6e2-0c955b6d8259 |
8d6ac2e0515b7df04cefef10ee64a54bfd319ad8 | 53983c1dbd4e27d918237d22287f1838ae42cc92 | /demo/plot.py | 5ac6310fad8fe1b293a3bb3c7d36e789ee28fadb | [] | no_license | xshii/MDAOXS | da5060ea6b6ac600b3b85dddbb7460f62ab4a684 | d4c54b79d7c84740bf01d8e8573e54522de2e6d0 | refs/heads/master | 2021-09-24T10:35:31.295574 | 2018-10-08T10:54:44 | 2018-10-08T10:54:44 | 108,884,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | import numpy as np
FORCE_FILE = "/Users/gakki/Dropbox/thesis/surface_flow_sort.csv"
info = np.loadtxt(FORCE_FILE,delimiter=',',skiprows=1)
num_elements = info.__len__()
num_nodes = info.__len__()
force_dict = {}
force_dict['GLOBALIDX'] = np.array(info[:,0],dtype=int)
force_dict['X'] = info[:,1]
force_dict['Y'] = info[:,2]
force_dict['PRESS'] = info[:,3]
force_dict['PRESSCO'] = info[:,4]
force_dict['MACHNUM'] = info[:,5]
from util.plot import *
plt.figure(2)
X = force_dict['X']
Y = force_dict['Y']
plt = twoDPlot(X,Y,plotstyle='scatter',xlabel='x',ylabel='y',label='',s=1,c='b')
plt.axis('equal')
finalizePlot(plt,title='Shape of NACA64a203',savefig=True,fname='NACA64a203.eps')
| [
"xshi@kth.se"
] | xshi@kth.se |
b5cbf11fb7b1ecff4f136de303477a0e972a7af8 | 00cd46c5722fbb4623d8cefc33bbce6e4c6bf970 | /Medium/70. Binary Tree Level Order Traversal II/Solution.py | 32d67bd39c759cadff9b855c310b3e12769d5848 | [
"MIT"
] | permissive | jxhangithub/lintcode | 9126d0d951cdc69cd5f061799313f1a96ffe5ab8 | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | refs/heads/master | 2022-04-02T22:02:57.515169 | 2020-02-26T21:32:02 | 2020-02-26T21:32:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: A tree
@return: buttom-up level order a list of lists of integer
"""
def levelOrderBottom(self, root):
# write your code here
queue = []
result = []
if root is None:
return result
queue.append(root)
while queue:
level = []
size = len(queue)
for i in range(size):
# pop queue
node = queue.pop(0)
level.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
result.insert(0, level)
return result
class Solution:
"""
@param root: A tree
@return: buttom-up level order a list of lists of integer
"""
def levelOrderBottom(self, root):
# write your code here
queue = []
stack = []
if root is None:
return stack
queue.append(root)
while queue:
level = []
size = len(queue)
for i in range(size):
# pop queue
node = queue.pop(0)
level.insert(0, node.val)
if node.right:
queue.append(node.right)
if node.left:
queue.append(node.left)
stack.append(level)
return stack[::-1]
| [
"nazhenye@gmail.com"
] | nazhenye@gmail.com |
dc20f889c44e724734de93e3c50622e58953615b | ead6ec54c304046e8017289ecae2acb69f2e463d | /examples/plot_expression_vs_inconsistent_splicing.py | 2cbc43e5fdf0860ab5f93a19da28d89f2488e1bd | [] | permissive | YeoLab/flotilla | 93e3576002f1b51917bc8576897d399176e1fa3a | 31da64567e59003c2b9c03fc8f4eb27ee62e299c | refs/heads/master | 2023-04-28T04:23:30.408159 | 2017-04-19T07:03:03 | 2017-04-19T07:03:03 | 19,319,564 | 104 | 27 | BSD-3-Clause | 2023-04-15T19:16:52 | 2014-04-30T16:14:31 | Jupyter Notebook | UTF-8 | Python | false | false | 308 | py | """
Show percentage of splicing events whose psi scores are inconsistent between pooled and single
==============================================================================================
"""
import flotilla
study = flotilla.embark(flotilla._shalek2013)
study.plot_expression_vs_inconsistent_splicing() | [
"olga.botvinnik@gmail.com"
] | olga.botvinnik@gmail.com |
cb1d46fd4ac57eda6a865a2a39ceb56d92a15920 | bd4dcd90d41aa228f0384c9ba03edd105a93d7ec | /checkout/migrations/0023_auto_20200306_0042.py | fb7d6f1e8c278ffe0c123a28a64a9e4eb78cd84b | [] | no_license | deganoth/mu-shop | 0be0bb0cfa635986b37edbe371daf8373f09aefd | dc1a77ecf6217286c005d762b559fe3f61ef2f6d | refs/heads/master | 2023-02-17T08:23:36.339586 | 2023-01-10T17:51:21 | 2023-01-10T17:51:21 | 243,972,792 | 0 | 1 | null | 2023-02-15T23:10:09 | 2020-02-29T13:22:02 | Python | UTF-8 | Python | false | false | 443 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-03-06 00:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('checkout', '0022_auto_20200305_2100'),
]
operations = [
migrations.RenameField(
model_name='orderlineitem',
old_name='big_total',
new_name='sub_total',
),
]
| [
"oliver.deegan@gmail.com"
] | oliver.deegan@gmail.com |
c097a069919c268b3216715892888aaeac7ad2c6 | 422dd5d3c48a608b093cbfa92085e95a105a5752 | /students/mark_luckeroth/lesson08/mailroom/mailroom_neo4j/initial_donor_data.py | 4e5607319fe29a51cad05a59db5525a69396fd06 | [] | no_license | UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018 | a2052fdecd187d7dd6dbe6f1387b4f7341623e93 | b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1 | refs/heads/master | 2021-06-07T09:06:21.100330 | 2019-11-08T23:42:42 | 2019-11-08T23:42:42 | 130,731,872 | 4 | 70 | null | 2021-06-01T22:29:19 | 2018-04-23T17:24:22 | Python | UTF-8 | Python | false | false | 625 | py | """
Data for database initialization
"""
def get_donor_data():
"""
demonstration data
"""
donor_data = [
{
'name': 'Peter Pan',
'amount': [10., 10., 10., 10.]
},
{
'name': 'Paul Hollywood',
'amount': [5., 5000., 5., 5.]
},
{
'name': 'Mary Berry',
'amount': [100.]
},
{
'name': 'Jake Turtle',
'amount': [123., 456., 789.]
},
{
'name': 'Raja Koduri',
'amount': [60., 60000.]
}
]
return donor_data | [
"mluckeroth@gmail.com"
] | mluckeroth@gmail.com |
7dc55a929586528ad8c370d7547fbc9424219378 | 9249947c07f8addf64dd3d2a2f9f37d379f83921 | /client_tools/svc/mgmt_Encryption.py | 103105ba9875d2dc7422365f420a93bb88511ca6 | [
"MIT"
] | permissive | operepo/ope | eb71aa763d157416009d7c3052ace11852660e0a | 018c82af46845315795c67c36801e2a128f515d5 | refs/heads/master | 2023-08-08T15:05:28.592589 | 2023-07-25T00:22:24 | 2023-07-25T00:22:24 | 96,855,111 | 12 | 11 | MIT | 2023-03-03T15:10:34 | 2017-07-11T05:42:14 | Perl | UTF-8 | Python | false | false | 3,026 | py |
import os
import pyaes as AES
import threading
import base64
from color import p
def fast_urandom16(urandom=[], locker=threading.RLock()):
"""
this is 4x faster than calling os.urandom(16) and prevents
the "too many files open" issue with concurrent access to os.urandom()
"""
try:
return urandom.pop()
except IndexError:
try:
locker.acquire()
ur = os.urandom(16 * 1024)
urandom += [ur[i:i + 16] for i in range(16, 1024 * 16, 16)]
return ur[0:16]
finally:
locker.release()
def pad(s, n=32, padchar=' '):
if len(s) == 0:
# Handle empty value - pad it out w empty data
s += padchar * n
return s
while ((len(s) % n) != 0):
s += padchar
#pad_len = len(s) % 32 # How many characters do we need to pad out to a multiple of 32
#if (pad_len != 0):
# #return s + (32 - len(s) % 32) * padchar
# return s + (
return s
def AES_new(key, iv=None):
""" Returns an AES cipher object and random IV if None specified """
if iv is None:
iv = fast_urandom16()
# return AES.new(key, AES.MODE_CBC, IV), IV
# Util.aes = pyaes.AESModeOfOperationCBC(key, iv = iv)
# plaintext = "TextMustBe16Byte"
# ciphertext = aes.encrypt(plaintext)
if not isinstance(key, bytes):
key = key.encode('utf-8')
return AES.AESModeOfOperationOFB(key, iv = iv), iv
class Encryption:
@staticmethod
def encrypt(data, key):
key = pad(key[:32])
cipher, iv = AES_new(key)
encrypted_data = iv + cipher.encrypt(pad(data, 16))
return base64.urlsafe_b64encode(encrypted_data)
@staticmethod
def decrypt(data, key):
key = pad(key[:32])
if data is None:
data = ""
try:
data = base64.urlsafe_b64decode(data)
except TypeError as ex:
# Don't let error blow things up
pass
iv, data = data[:16], data[16:]
try:
cipher, _ = AES_new(key, iv=iv)
except:
# bad IV = bad data
return "" # data
try:
data = cipher.decrypt(data)
except:
# Don't let error blow things up
return ""
pass
if isinstance(data, bytes):
#p("is bytes")
try:
data = data.decode('utf-8')
#p("f")
except:
p("err decoding encrypted data as utf-8")
try:
data = data.decode('ascii')
except:
p("err decoding encrypted data as ascii", log_level=5)
try:
data = data.decode('latin-1')
except:
p("err decoding encrypted data as latin-1 - returning raw data",
log_level=4)
data = str(data)
data = data.rstrip(' ')
return data | [
"ray@cmagic.biz"
] | ray@cmagic.biz |
769219adae4387ad2ba40f50b8ca9c1c48888478 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/296/95757/submittedfiles/testes.py | e500c6c53d33aa7c9f97e8ef4d2d530847785cdd | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
#ENTRADA
#n = int(input("Digite o valor de n: "))
#a = []
#for i in range(0,n,1):
# a.append(int(input('Digite um valor: ')))
#media = sum(a)/len(a)
#soma = 0
#for i in range(0,n,1):
# soma += (a[i] - media)**2
# desvio = ((1/(n-1)*soma))**0.5
#print(desvio)
n = int(input("Digite a quantidade de elementos das listas: "))
while n<=1:
n = int(input("Digite a quantidade de elementos das listas: "))
a = []
for i in range (0,n,1):
a.append(int(input("Digite um valor para a lista a: ")))
ordenados = sorted(a)
print(ordenados)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
96bd5bd6157df167e82e703da1539aa517133836 | abe8db893dcb244a95fef47b611c4527b591eae4 | /env_log.py | 123a310207555d60a46f9dd1c4959baf54529d45 | [] | no_license | JohnMwashuma/raspberry-pi-home-automation-system | e057afa315e86c70e39ce3f0e46dc2e2e70f8ea9 | 6f889b7417c73fdbb142556d5f23083a64819bba | refs/heads/main | 2023-07-08T06:33:54.353884 | 2021-08-16T22:50:30 | 2021-08-16T22:50:30 | 396,997,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | import sqlite3
import sys
import adafruit_dht
import board
import pickle
import os
import RPi.GPIO as GPIO
import time
basedir = os.path.abspath(os.path.dirname(__file__))
weather_pred_model_file = os.path.join(basedir, 'static/ml_models/weather_pred_model.pkl')
with open(weather_pred_model_file, 'rb') as file:
weather_pred_model = pickle.load(file)
def log_values(sensor_id, temp, hum, rain):
conn = sqlite3.connect('/var/www/weather_app/weather_app.db')
curs = conn.cursor()
curs.execute("""INSERT INTO weather values(datetime(CURRENT_TIMESTAMP, 'localtime'), (?), (?), (?), (?))""", (sensor_id, temp, hum, rain))
conn.commit()
conn.close()
def predict_rain(hum, temp):
return "Yes" if weather_pred_model.predict([[hum, temp]])[0] else "No"
dht_device = adafruit_dht.DHT22(board.D17)
humidity = dht_device.humidity
temperature = dht_device.temperature
rain = "No"
if humidity is not None and temperature is not None:
rain = predict_rain(humidity, temperature)
log_values("1", temperature, humidity, rain)
else:
log_values("1", -999, -999, "No")
def spin_motor():
GPIO.setwarnings(False)
servo_pin = 27
mode = GPIO.getmode()
# check if the GPIO mode has not been set to GPIO.BCM
if (mode != 11):
GPIO.setmode(GPIO.BOARD)
servo_pin = 13
GPIO.setup(servo_pin, GPIO.OUT)
pwd = GPIO.PWM(servo_pin, 50)
pwd.start(0)
duty = 2
while (duty <= 12):
for i in range(0, 180):
DC = 1./18.*(i) + duty
pwd.ChangeDutyCycle(DC)
time.sleep(0.02)
for i in range(180, 0, -1):
DC = 1./18.*(i) + duty
pwd.ChangeDutyCycle(DC)
time.sleep(0.02)
duty = duty + 1
pwd.stop()
GPIO.cleanup()
if rain == "No":
# Spin motor
spin_motor()
| [
"jmwashuma@live.com"
] | jmwashuma@live.com |
8050a9e913c22b8feb35fa71d990b997b2a8cf43 | 9bbb00c09aaaa19565d3fb8091af568decb5820f | /8_Introduction_To_Data_Visualization_With_Seaborn/3_Visualizing_A_Categorical_And_A_Quantitative_Variable/6_Adjusting_The_Whiskers.py | 1d4d59cccf0bb3ca2373e18884ab62c217e56e7e | [] | no_license | PeterL64/UCDDataAnalytics | 4417fdeda9c64c2f350a5ba53b2a01b4bdc36fc7 | d6ff568e966caf954323ecf641769b7c79ccb83a | refs/heads/master | 2023-06-14T04:10:41.575025 | 2021-07-07T15:23:50 | 2021-07-07T15:23:50 | 349,780,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # Adjusting the Whiskers
import matplotlib.pyplot as plt
import seaborn as sns
sns.catplot(x="romantic", y="G3",
data=student_data,
kind="box")
# Adjust the code to make the box plot whiskers to extend to 0.5 * IQR. Recall: the IQR is the interquartile range.
sns.catplot(x="romantic", y="G3",
data=student_data,
kind="box", whis=0.5)
plt.show()
# Change the code to set the whiskers to extend to the 5th and 95th percentiles.
sns.catplot(x="romantic", y="G3",
data=student_data,
kind="box", whis=[5,95])
plt.show()
# Change the code to set the whiskers to extend to the min and max values.
sns.catplot(x="romantic", y="G3",
data=student_data,
kind="box", whis=[0, 100])
plt.show() | [
"peterlyonscbar@gmail.com"
] | peterlyonscbar@gmail.com |
2172ea23665176c578172d05c7209180cf0bda9a | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/kepler_j184307+425918/sdB_kepler_j184307+425918_coadd.py | 9551f6bef545a195fef94c13c21906800ee1465f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[280.779167,42.988333], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_kepler_j184307+425918/sdB_kepler_j184307+425918_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_kepler_j184307+425918/sdB_kepler_j184307+425918_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
cb48d1aa3eea8abd662c8a3212d0cea4ee4a0e02 | 245381ad175dcc03ee0710964340eed4daa2ef85 | /accounts/all/expenses/migrations/0001_initial.py | 4bd9f71bd00c98b1fabbc6d6ba7d680a7be15c40 | [] | no_license | musabansari-1/Shagroup-erp-backend | 2c1f56f7ce5763dae668d160cdcc1a26dbc2e8d7 | 87845f11faae50301d5bb73ffa0c3ee0bed38256 | refs/heads/main | 2023-04-13T02:25:36.808755 | 2021-04-15T16:28:19 | 2021-04-15T16:28:19 | 358,324,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | # Generated by Django 3.1.2 on 2020-11-27 11:06
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AccountHead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
('paid_by', models.CharField(max_length=40)),
('paid_date', models.DateField()),
('paid_amount', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('paid_status', models.BooleanField(default=False)),
('account_head', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='expenses.accounthead')),
],
),
]
| [
"musabzahida@gmail.com"
] | musabzahida@gmail.com |
9a00bd567fb868335fe522f381502191a0a21c24 | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/components/lib/fmdv/rate_limiting_mgmt.py | 81486feffc93fc8b5e7a464da6cf053afb738d39 | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | import logging
import time
from pprint import pprint, pformat
from RuckusAutoTest.common.utils import *
from RuckusAutoTest.components.lib.AutoConfig import * # rarely reloading, so import *
from RuckusAutoTest.components.lib.fm.config_mapper_fm_old import map_cfg_value
# %s = 0 to 7
# 0 --> Wireless 1
# 1 --> Wireless 2
# 2 --> Wireless 3
# ..............
# 7 --> Wireless 8
TabTmpl = "//div[@dojoinsertionindex='%s']"
Locators = dict(
edit_btn = TabTmpl + "//div[@class='dojoButton' and div='Edit Settings']",
back_btn = TabTmpl + "//div[@class='dojoButton' and div='Back']",
submit_btn = TabTmpl + "//div[@class='dojoButton' and div='Submit']",
reset_btn = TabTmpl + "//div[@class='dojoButton' and div='Reset']",
downlink = Ctrl(TabTmpl + "//select[contains(@name, 'RateDown')]",
type = 'select'),
uplink = Ctrl(TabTmpl + "//select[contains(@name, 'RateUp')]", type = 'select'),
tab_link_tmpl = TabTmpl + "//span[1]",
)
def _nav_to(dv, wlan = 0, force = True):
'''
- wlan: from 0 to 7
0 -> Wireless 1
1 -> Wireless 2
...............
7 -> Wireless 8
'''
dv.navigate_to(dv.DETAILS, dv.DETAILS_RATE_LIMITING, force = force)
dv.selenium.click_and_wait(Locators['tab_link_tmpl'] % wlan)
def _get_locs(wlan = 0):
'''
This function is to map locator of Rate Limiting WLAN items
wlan:
0: Wireless 1,
1: Wireless 2,
...
7: Wireless 8,
output:
- return locator for wlan n
'''
return formatCtrl(Locators, [wlan])
def get_cfg(dv, wlan = 1, cfg_keys = []):
'''
This fucntion is to get items having keys provided in cfg_keys. if cfg_keys is empty,
it gets all times
Input:
- dv: Device View instance
- wlan: wlan to get its items. Value to pass for this param is from 1 to 8
- cfg_keys: a list, keys of items to get
Output:
- return a dictionary of items
'''
logging.info('Get Rate Limiting items of %s config' % wlan)
s, l = dv.selenium, _get_locs(wlan - 1)
_nav_to(dv, wlan - 1, True)
s.click_and_wait(l['edit_btn'])
cfg = get(s, l, cfg_keys if cfg_keys else ['uplink', 'downlink'])
return map_cfg_value(cfg, False)
def set_cfg(dv, wlan, cfg):
'''
This function is to set cfg for Rate Limiting of a wlan.
Input:
- dv: Device View instance,
- wlan: wlan to set cfg.
- cfg: a dictionary of configuration to set
Output:
- Return (task status, message)
'''
logging.info('Set Rate Limiting for wlan %s. Cfg: %s' % (wlan, pformat(cfg)))
s, l = dv.selenium, _get_locs(wlan - 1)
_nav_to(dv, wlan - 1, True)
s.click_and_wait(l['edit_btn'])
set(s, l, map_cfg_value(cfg))
s.click_and_wait(l['submit_btn'])
return dv.get_task_status(dv.get_task_id())
| [
"tan@xx.com"
] | tan@xx.com |
7ee1566109a0e1439a00b673b8094e4310cedd42 | 865bd0c84d06b53a39943dd6d71857e9cfc6d385 | /210-course-schedule-ii/course-schedule-ii.py | db96e80bd54c95cb7537ecfde09c1863953c45f2 | [] | no_license | ANDYsGUITAR/leetcode | 1fd107946f4df50cadb9bd7189b9f7b7128dc9f1 | cbca35396738f1fb750f58424b00b9f10232e574 | refs/heads/master | 2020-04-01T18:24:01.072127 | 2019-04-04T08:38:44 | 2019-04-04T08:38:44 | 153,473,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,198 | py | # There are a total of n courses you have to take, labeled from 0 to n-1.
#
# Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
#
# Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
#
# There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
#
# Example 1:
#
#
# Input: 2, [[1,0]]
# Output: [0,1]
# Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
# course 0. So the correct course order is [0,1] .
#
# Example 2:
#
#
# Input: 4, [[1,0],[2,0],[3,1],[3,2]]
# Output: [0,1,2,3] or [0,2,1,3]
# Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
# courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
# So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
#
# Note:
#
#
# The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
# You may assume that there are no duplicate edges in the input prerequisites.
#
#
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
from collections import defaultdict
n = numCourses
adjacent = defaultdict(list)
indegree = {}
for a, b in prerequisites:
adjacent[b].append(a)
indegree[a] = indegree.get(a, 0) + 1
zero_indegree_queue = [k for k in range(n) if k not in indegree]
ans = []
while zero_indegree_queue:
curr = zero_indegree_queue.pop(0)
ans.append(curr)
if curr in adjacent:
for neighbor in adjacent[curr]:
indegree[neighbor] -= 1
if indegree[neighbor] == 0:
zero_indegree_queue.append(neighbor)
return ans if len(ans) == n else []
| [
"andyandwei@163.com"
] | andyandwei@163.com |
5e527ba3ed03cb611093ea4a7ed402f680cbcf51 | d2fdd6b10b0467913971d1408a9a4053f0be9ffb | /datahub/company/migrations/0066_ch_registered_address_country_not_null.py | cf658a1cd0ef5705a3155be10c71d4023ec716e7 | [] | no_license | jakub-kozlowski/data-hub-leeloo | fc5ecebb5e4d885c824fc7c85acad8837fcc5c76 | 7f033fcbcfb2f7c1c0e10bec51620742d3d929df | refs/heads/master | 2020-05-18T13:29:14.145251 | 2019-04-30T12:12:50 | 2019-04-30T12:12:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | # Generated by Django 2.1.5 on 2019-02-12 16:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0065_ch_registered_address_2_not_null'),
]
operations = [
migrations.AlterField(
model_name='companieshousecompany',
name='registered_address_country',
field=models.ForeignKey(default='80756b9a-5d95-e211-a939-e4115bead28a', on_delete=django.db.models.deletion.PROTECT, related_name='companieshousecompanies_with_country_registered_address', to='metadata.Country'),
preserve_default=False,
),
]
| [
"info@marcofucci.com"
] | info@marcofucci.com |
6f67a3c7a74f5f30b7116e81f2094dc0e086e3bc | 9aabce2e3e0f7b8050918639db14e4111de66a40 | /mains/code/main.py | c555d9e7e3671ceab6a3b406ef02c223f79d4398 | [] | no_license | LeGrosLezard/cadju | 3dd8007fa6a05c13a8d736b1929067ea775ab801 | 76d5893cdd099b7da34d551f208433072c479d70 | refs/heads/master | 2020-06-29T00:23:18.428684 | 2019-11-13T17:36:35 | 2019-11-13T17:36:35 | 200,383,501 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | import cv2
import numpy as np
import csv
import os
import scipy
import sklearn
from sklearn.feature_extraction import image
from scipy.io import loadmat
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import numpy as np
from config import model
from config import config
from config import data
liste = os.listdir(r"C:\Users\jeanbaptiste\Desktop\cadju\mains\HAND\images\main_image\annotations")
path = "../images/main_image/annotations/{}"
#les fichiers .mat
for i in liste:
print("")
print(i)
#ouverture fichier .mat
points = scipy.io.loadmat(path.format(str(i)))
coords = [[], []]
#recuperation des pts du .mat
for nb, i in enumerate(points["boxes"][0]):
for j in i[0]:
for k in j:
coords[nb].append(k[0].tolist())
#des fois y'a qu'un seul point qu'on definit par one
one = False
for lst in coords:
if lst == []:
one = True
#si pas one on recup les 2 pts
#sinon on stop
for nb, pts in enumerate(coords):
print(nb)
for p in pts:
print(p)
if one is True:
break
print("")
#faut définir tous les pts en txt.
| [
"noreply@github.com"
] | LeGrosLezard.noreply@github.com |
579b7789e11594bdb660fc41763c770c4547fd2e | 90f729624737cc9700464532a0c67bcbfe718bde | /lino_xl/lib/votes/roles.py | 0576b9a664dd91b9ea3447edc05977681908fbdb | [
"AGPL-3.0-only"
] | permissive | lino-framework/xl | 46ba6dac6e36bb8e700ad07992961097bb04952f | 642b2eba63e272e56743da2d7629be3f32f670aa | refs/heads/master | 2021-05-22T09:59:22.244649 | 2021-04-12T23:45:06 | 2021-04-12T23:45:06 | 52,145,415 | 1 | 5 | BSD-2-Clause | 2021-03-17T11:20:34 | 2016-02-20T09:08:36 | Python | UTF-8 | Python | false | false | 501 | py | # Copyright 2016-2017 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""User roles for this plugin. """
from lino.core.roles import UserRole
class SimpleVotesUser(UserRole):
"""A user who has access to basic contacts functionality.
"""
class VotesUser(SimpleVotesUser):
"""A user who has access to full contacts functionality.
"""
class VotesStaff(VotesUser):
"""A user who can configure contacts functionality.
"""
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
6b7b317bf5f3524b3b732fa3d54bbaa6be9c5b9d | 36f5ae42b7e34ddfad452200c45f367889132cbc | /accounts/migrations/0001_initial.py | caa7aef2c131306426dcdd4fde72a1e9fc670815 | [] | no_license | SqpuHkS/eCommerce_project | c17bfac40a63b8bac42e908efa1ee3215e383683 | dc31f25349044c481ffc003d574dab6bce8c222c | refs/heads/master | 2022-12-10T01:24:46.144399 | 2020-09-13T17:36:24 | 2020-09-13T17:36:24 | 260,966,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # Generated by Django 3.0.5 on 2020-08-07 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GuestEmail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('active', models.BooleanField(default=True)),
('update', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"kovaleksey01@gmail.com"
] | kovaleksey01@gmail.com |
5c5a1bf63e8776a081904cadab852a52397d6fdb | bc441bb06b8948288f110af63feda4e798f30225 | /topology_sdk/model/topology/link_pb2.pyi | b05c2f57e9570634dfd81450d5f5ab5a77a6ad22 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from topology_sdk.model.topology.linkStyle_pb2 import (
LinkStyle as topology_sdk___model___topology___linkStyle_pb2___LinkStyle,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Link(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
source = ... # type: typing___Text
target = ... # type: typing___Text
@property
def style(self) -> topology_sdk___model___topology___linkStyle_pb2___LinkStyle: ...
def __init__(self,
*,
source : typing___Optional[typing___Text] = None,
target : typing___Optional[typing___Text] = None,
style : typing___Optional[topology_sdk___model___topology___linkStyle_pb2___LinkStyle] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Link: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Link: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"style",b"style"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"source",b"source",u"style",b"style",u"target",b"target"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
33ea6da52fd684973803ad2dd1e01cc34d4b4f38 | 795b68819d51af14dfabb8dbe40c9e8153029188 | /test.py | e99fd09873fc63ba62a1705f21df709d2b20760f | [] | no_license | MotazBellah/Code-Challenge | 507f1fd3d5b3265e54905979c80d609afd81c54d | c38c95239193e26c1a88f6736d2ab9ee37185964 | refs/heads/master | 2022-02-25T02:54:10.216892 | 2022-02-19T19:28:05 | 2022-02-19T19:28:05 | 193,115,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | #!/usr/bin/env python3
import unittest
import itertools
from max_money import get_max_money
def read_input():
"""Read the input file
output: tuble (the info about the restructuring
and info about single machine and the length of restructuring data)"""
# Use with to make sure the file will be closed after the block executed
with open('snapshot_input.txt') as f:
# Split the line at line breaks
x = f.read().splitlines()
# Get the data of restructuring, three positive integers N , C , and D
# Use generator expression for time and space efficiency
restructuring_info = (i.split() for i in x if len(i.split())==3)
# Get the data of single machine, four integers D, P, R and G
machine_info = (i.split() for i in x if len(i.split())!=3)
# Get the length of restructuring data
length = sum(1 for i in x if len(i.split())==3)
return restructuring_info, machine_info, length
def test_inputs():
"""Take the data from the input file
and display the max money by calling
the get_max_money function and return generator"""
# Get data from the input file and unpacking it to three variable
get_data = read_input()
restructuring_info = get_data[0]
machine_info = get_data[1]
length = get_data[2]
# start, end to slice the generator
start, end = 0, 0
# Loop through lenght of restructuring
# Get x (restructuring_info) and y (machine_info)
# Use x, y as a parameter for get_max_money function
for i in range(length):
x = next(restructuring_info)
y = []
# Check if there a machine
# clone the machine_info generator to slice it
# the cliced generator represent the machine informations
# that related to restructuring_info
if int(x[0]):
end += int(x[0])
machine_info, m_backup = itertools.tee(machine_info)
y = itertools.islice(m_backup, start, end)
start = end
yield ('Case ' +str(i+1) + ": " + str(get_max_money(x, y)))
if __name__ == '__main__':
tests = test_inputs()
print("========================================")
print("Test Cases Results Are The Following: ")
for test in tests:
print(test)
| [
"engineer.android@yahoo.com"
] | engineer.android@yahoo.com |
cc93075d751b80d89ebf5e8b8af695e4da533b65 | 70af71ad602a8b983aae24f5df4a7040d1f7d7ab | /fabfile/fabhelper/result.py | 1dcdaa7ddea0f77b56050d07801235b432052552 | [] | no_license | suzuki-hoge/fabhelper | afd5c5f1c618dd0eb091b10a90a2b19a98472982 | eb97e3e2e4767105592910a3ccb4d9a5dc288d88 | refs/heads/master | 2021-01-01T03:43:53.658837 | 2015-03-29T06:24:36 | 2015-03-29T06:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from fabric.api import hide
from util import execute
from configure import result
def done(command, bold = False):
color = result.done
__print(color, bold, command)
def error(command, bold = False):
color = result.error
__print(color, bold, command)
def already(command, bold = False):
color = result.already
__print(color, bold, command)
def __print(color, bold, command):
with hide('everything'):
print color(execute(command), bold)
| [
"user.ryo@gmail.com"
] | user.ryo@gmail.com |
ef7aea42c5fa7f9c5fbe860ead6ebc311dddf263 | 8066a31f8a8e7abf2bcd077eef5b6346f7c66874 | /test_scripts/GUI_test/GUI_pyqt5graph_class_test.py | ddec36eaa1d4fa2ff3d2ca38143ce7527c0a8711 | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | steinnymir/RegAscope2017 | 42831893eba851ad0cc6705445629e405efe1520 | 138e11613c15cea73d73a64daed591c15303fd4d | refs/heads/master | 2021-01-20T01:53:28.278365 | 2018-08-21T14:40:22 | 2018-08-21T14:40:22 | 89,341,133 | 1 | 1 | null | 2017-05-09T10:08:23 | 2017-04-25T09:15:04 | Python | UTF-8 | Python | false | false | 3,461 | py | """
GUI graph plotting test
author: S.Y.Agustsson
"""
from PyQt5 import QtGui, QtCore, QtWidgets # (the example applies equally well to PySide)
import sys
import pyqtgraph as pg
import numpy as np
def rand(n):
data = np.random.random(n)
data[int(n*0.1):int(n*0.13)] += .5
data[int(n*0.18)] += 2
data[int(n*0.1):int(n*0.13)] *= 5
data[int(n*0.18)] *= 20
data *= 1e-12
return data, np.arange(n, n+len(data)) / float(n)
#
#def updateData():
# yd, xd = rand(10000)
# p1.setData(y=yd, x=xd)
class PlottingGUI(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.col = QtGui.QColor(0, 0, 0)
# self.timer = QtCore.QTimer()
# timer.timeout.connect(updateData)
# timer.start(50)
## Define a top-level widget to hold everything
#w = QtGui.QWidget()
## Create some widgets to be placed inside
btn = QtGui.QPushButton('press me')
btn.setCheckable(True)
text = QtGui.QLineEdit('enter text')
listw = QtGui.QListWidget()
pw1 = pg.PlotWidget(name = 'plot1')
#pw2 = pg.PlotWidget(name = 'plot2')
## Create a grid layout to manage the widgets size and position
layout = QtGui.QGridLayout()
self.setLayout(layout)
## Add widgets to the layout in their proper positions
layout.addWidget(btn, 0, 0) # button goes in upper-left
layout.addWidget(text, 1, 0) # text edit goes in middle-left
layout.addWidget(listw, 2, 0) # list widget goes in bottom-left
layout.addWidget(pw1, 0, 1, 3, 1) # plot goes on right side, spanning 3 rows
#layout.addWidget(pw2, 3, 1, 2, 1)
#data.updateData()
#pw1.plotCurve(data)
#p1 = pw1.plotXY()
def plotXY(self):
self.plot(x,y)
self.show()
#self.plot(np.random.normal(size=100), pen=(255,0,0), name="Red curve")
# def updateData(self):
# yd, xd = rand(10000)
# self.setData(y=yd, x=xd)
if __name__ == '__main__':
## Always start by initializing Qt (only once per application)
#app = QtGui.QApplication([])
x = np.arange(0,1000,1)
noise = np.random.normal(0,1,1000)/1
y = np.sin(x/10)+noise
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication(sys.argv)
plGUI = PlottingGUI()
sys.exit(app.exec_())
#pw1.plot(np.random.normal(size=100), pen=(255,0,0), name="Red curve")
# ## Define a top-level widget to hold everything
# w = QtGui.QWidget()
#
# ## Create some widgets to be placed inside
# btn = QtGui.QPushButton('press me')
# text = QtGui.QLineEdit('enter text')
# listw = QtGui.QListWidget()
# plot = pg.PlotWidget()
# otherplot = pg.PlotWidget()
#
#
# ## Create a grid layout to manage the widgets size and position
# layout = QtGui.QGridLayout()
# w.setLayout(layout)
#
# ## Add widgets to the layout in their proper positions
# layout.addWidget(btn, 0, 0) # button goes in upper-left
# layout.addWidget(text, 1, 0) # text edit goes in middle-left
# layout.addWidget(listw, 2, 0) # list widget goes in bottom-left
# layout.addWidget(plot, 0, 1, 3, 1) # plot goes on right side, spanning 3 rows
# layout.addWidget(otherplot, 3, 1, 2, 1)
## Display the widget as a new window
#w.show()
## Start the Qt event loop
| [
"sagustss@uni-mainz.de"
] | sagustss@uni-mainz.de |
dbbfad268ab0c217e2d7721ad1784c524bf85e45 | c4c0734c2ea10585e5862de8d8448df3425e8c89 | /interview_collection/crawler/interview_exp_new_coder.py | 34b6f46b012dcde01d8bb69daf579ce8b34624fa | [] | no_license | dalalsunil1986/online-judge-boosting | 19123449ed7b78d40335b3fe9fc13626a5b79b9a | 036bd382e9ace22066f7cad05fe9e7370d0fcf1b | refs/heads/master | 2022-04-15T16:32:58.189990 | 2020-04-15T17:16:57 | 2020-04-15T17:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,633 | py | # encoding: utf-8
"""
@author: pkusp
@contact: pkusp@outlook.com
@version: 1.0
@file: interview_exp_new_coder.py
@time: 2019/11/24 6:20 PM
这一行开始写关于本文件的说明与解释
"""
import re
import logging
import os
from bs4 import BeautifulSoup
import bs4
import requests
ms_exp_url = '/discuss/tag/146?type=2&order=0&query=' ## type=2 interview_exp
exp_url = "https://www.nowcoder.com/discuss?type=2&order=0&pageSize=30&expTag=0&page="
root_url = 'https://www.nowcoder.com'
headers = {
":authority": "www.nowcoder.com",
":method": "GET",
":path": exp_url,
":scheme": "https",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en,zh-CN;q=0.9,zh;q=0.8",
"cache-control": "max-age=0",
"cookie": "NOWCODERUID=A480574C365E6F87685EDCCB285F4ED9; NOWCODERCLINETID=6916FFBC7097D08C3158EA483A076A36; Hm_lvt_a808a1326b6c06c437de769d1b85b870=1574588687; Hm_lpvt_a808a1326b6c06c437de769d1b85b870=1574591238; SERVERID=11b18158070cf9d7800d51a2f8a74633|1574591238|1574588683",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36",
}
def get_url_list():
page = 'https://www.nowcoder.com/discuss/tag/146?type=2&order=0&pageSize=30&expTag=0&query=&page='
# type:interview_exp
# pos: topic position
# page:page
exp = 'https://www.nowcoder.com/discuss/315360?type=2&order=0&pos=5&page=1'
url_list = []
for i in range(4):
base_url = page+str(i)
page_res = requests.get(base_url)
if page_res.status_code == 200:
soup = BeautifulSoup(page_res.text, "html.parser")
for tag in soup.find_all("a"):
title = tag.text
title_url = tag.attrs["href"]
if (title.find("面经")!=-1 or title.find("凉经")!=-1) and title.find("微软")!=-1:
print(title)
print(i,'===============')
res_url = root_url+title_url
url_list.append((title,res_url))
return url_list
def merge_blank(s):
res = ""
for i in range(len(s)-1):
if s[i] == ' ' and s[i+1] == ' ':
continue
res+=s[i]
if s[i]=="\\" and s[i+1]=="n":
i+=2
return res
def get_content(url):
# url = "https://www.nowcoder.com/discuss/54773?type=2&order=0&pos=68&page=1"
page_res = requests.get(url)
soup = BeautifulSoup(page_res.text, "html.parser")
print('-------')
content = soup.find_all(class_="post-topic-des nc-post-content")
content = str(content)
del_string = re.findall("<.*?>",content)
for dels in set(del_string):
content = content.replace(dels,"")
return merge_blank(content)
def save_content(contents):
with open("ms_interview.md",mode='w') as f:
for content in contents:
f.write(content.replace("\n","")+"\n")
def main():
ms_interview = [ ]
url_list = get_url_list()
i = 0
for title,url in url_list:
i+=1
text = get_content(url)
# ms_interview.append(("url: ["+url+"] title: ["+title+"] \ncontent: "+text))
line = "{}. [{}]({}) : {}".format(i,title,url,text)
ms_interview.append(line)
print(ms_interview)
save_content(ms_interview)
if __name__ == "__main__":
main()
| [
"pkusp@outlook.com"
] | pkusp@outlook.com |
d03c1870d27e055548f8e560d73dad39e7d7237b | ba995756ff6c856abe98c387bd85ea8cfca66a74 | /medium/question17.py | 49513b1b09fa9ca8363896b41f8de933b8f6953c | [] | no_license | HarshilModi10/MCP_Competition | ae7be388c947ce0a80a84dfe4cda426060d993c5 | 646e0fe39a51a1d48a8a987435307f7cfca7938a | refs/heads/master | 2020-04-24T17:27:31.522701 | 2019-08-04T15:40:12 | 2019-08-04T15:40:12 | 172,147,919 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
map = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl',
'6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}
return [a + b for a in map[digits[0]] for b in (self.letterCombinations(digits[1:]) or ['']) ] if digits else []
#backtracking
class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
res = []
number = {"2": ["a", "b", "c"],
"3": ["d", "e", "f"],
"4": ["g","h", "i"],
"5": ["j", "k", "l"],
"6": ["m", "n", "o"],
"7": ["p", "q", "r"],
"8": ["s", "t", "u"],
"9": ["v", "w", "x", "z"] }
def get_combination(seq, digit):
if not digit:
res.append(seq)
else:
for letter in number[digit[0]]:
get_combination(seq+letter,digit[1:])
if digits:
get_combination("", digits)
return res
class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
if not digits:
return []
key_pad = {2:['a','b','c'],
3:['d','e','f'],
4:['g','h','i'],
5:['j','k','l'],
6:['m','n','o'],
7:['p','q','r', 's'],
8:['t','u','v'],
9:['w','x','y','z']
}
res = []
self.letter_combo(key_pad, res, digits, [])
return res
def letter_combo(self, key_pad, res, digits, partial):
if not digits:
res.append("".join(partial))
return
for letter in key_pad[int(digits[0])]:
self.letter_combo(key_pad, res, digits[1:], partial + [letter])
| [
"modih1@mcmaster.ca"
] | modih1@mcmaster.ca |
f7b3bc201a9f991db77d8d84c5b71676c73ffe6d | f58e6240965d2d3148e124dcbdcd617df879bb84 | /tensorflow_datasets/image_classification/plantae_k_test.py | e9b757eeb288bef7229ee3f471c0759f67b3eae8 | [
"Apache-2.0"
] | permissive | suvarnak/datasets | b3f5913cece5c3fe41ec0dde6401a6f37bfd9303 | 3a46548d0c8c83b2256e5abeb483137bd549a4c1 | refs/heads/master | 2022-09-27T03:38:20.430405 | 2022-07-22T15:21:33 | 2022-07-22T15:27:07 | 176,061,377 | 0 | 0 | Apache-2.0 | 2019-03-17T05:45:33 | 2019-03-17T05:45:32 | null | UTF-8 | Python | false | false | 1,475 | py | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for the PlantLeaves dataset."""
from tensorflow_datasets import testing
from tensorflow_datasets.image_classification import plantae_k
class PlantaeKTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = plantae_k.PlantaeK
# See note below about the +1
SPLITS = {"train": 16 + 1}
_LABEL_TAGS = [
"apple_d", "apple_h", "apricot_d", "apricot_h", "cherry_d", "cherry_h",
"cranberry_d", "cranberry_h", "grapes_d", "grapes_h", "peach_d",
"peach_h", "pear_d", "pear_h", "walnut_d", "walnut_h", "walnut-h"
]
# NOTE: Must match file names in the test directory. Due to bug in file naming
# we have to have both walnut_d and walnut_h for healthy walnut.
DL_EXTRACT_RESULT = {
fname: fname
for fname in ["{}1.JPG".format(label_tag) for label_tag in _LABEL_TAGS]
}
if __name__ == "__main__":
testing.test_main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
7c35c58f2eb9eae4e9ce96a132bcbff67880b63e | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/1049-Last-Stone-Weight-II/1049.py | d728e7a777981aa408cabd7de388682424de0a21 | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 389 | py | class Solution:
def lastStoneWeightII(self, stones: List[int]) -> int:
all_sum = sum(stones)
mem = [0]*(all_sum//2+1)
mem[0] = 1
for i in stones:
for j in range(all_sum//2, i-1, -1):
mem[j] |= mem[j - i]
for i in range(all_sum//2, -1, -1):
if mem[i]:
return all_sum - 2*i
return 0 | [
"luliyucoordinate@outlook.com"
] | luliyucoordinate@outlook.com |
286b923345d5064a03a8fa12f9518a40fc2944eb | 8df496a1131913d660515db3fe7372a37fb58023 | /api/users/serializers/plan_subscriptions.py | 30b23f359ca0597e82267d2816efd47c32b720d6 | [] | no_license | alexhernandez-git/freelanium | d99ff35ed3f1a4b3f0ab5cbf3bf94d0414ff33b4 | 4dea2123d92375a223dad63eb79d90084ac5b1af | refs/heads/main | 2023-04-08T15:37:12.663455 | 2021-04-05T12:33:13 | 2021-04-05T12:33:13 | 347,499,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py |
"""Users serializers."""
# Django REST Framework
from rest_framework import serializers
# Django
from django.conf import settings
from django.contrib.auth import password_validation, authenticate
from django.core.validators import RegexValidator
from django.shortcuts import get_object_or_404
# Models
from api.users.models import PlanSubscription, User
class PlanSubscriptionModelSerializer(serializers.ModelSerializer):
"""User model serializer."""
class Meta:
"""Meta class."""
model = PlanSubscription
fields = (
"id",
"subscription_id",
"product_id",
"to_be_cancelled",
"cancelled",
"status",
"payment_issue",
"current_period_end",
"plan_type",
"plan_unit_amount",
"plan_currency",
"plan_price_label"
)
read_only_fields = ("id",)
| [
"vlexhndz@gmail.com"
] | vlexhndz@gmail.com |
f6d2414e76479c560be6ec01a500aae4be5fd2b0 | 3cd9d29a607d0065cccc2a470f4bf1770b6bff9b | /reset.py | 9d0abe01cf4a257e7206c6869f002b7d228d59db | [] | no_license | Toofifty/the-great-sub | 851898cc1411a9ab78792fc5f44f08a2f5c37c2e | 6fe254191c366fd0e1fc2c41591ccdaf4a0485c3 | refs/heads/master | 2021-01-10T15:18:59.316478 | 2015-05-29T10:24:26 | 2015-05-29T10:24:26 | 36,076,776 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | """
Reddit '/u/sub_toppings_bot' v2.0
Subreddit reset button. Clears the entire subreddit
of submissions (posted by the bot).
Super dangerous.
@author: Toofifty
"""
import time, praw
def main():
print "\nIdentifying with Reddit...",
# Loading from plain text like a bawss
with open("creds.txt", 'r') as f:
creds = f.read().strip().split("\n")
# Identify with Reddit, and log in using the
# credentials loaded from plaintext.
reddit = praw.Reddit("Sub Toppings Stealer v2.0 by /u/Toofifty")
reddit.login(creds[0], creds[1])
gsub = reddit.get_subreddit("thegreatsub")
print "Success."
while True:
# Only need to use get_top_from_all - it doesn't
# matter which order we get all the submissions in.
for subm in gsub.get_top_from_all(limit=100):
# Damn non-unicode chars >.>
try:
print "Deleting %s..." % subm.title,
except:
pass
if str(subm.author) == creds[0]:
subm.delete()
print "Done."
# Don't really need to sleep, but it helps
# keep things under control.
# time.sleep(2)
time.sleep(5)
print "Finished clearing submissions."
if __name__ == "__main__":
main()
| [
"amatho250@gmail.com"
] | amatho250@gmail.com |
7e23225dbc77f5be0faada6dddeae7e98d592d56 | 9e28200b71d43de1e122a964e88f1b547bfde465 | /question_leetcode/1429_3.py | bc7115a17c8ab44a58629efdf7f6f2e2b9be4680 | [] | no_license | paul0920/leetcode | 6f8a7086eefd3e9bccae83752ef41cbfee1acaea | 474886c5c43a6192db2708e664663542c2e39548 | refs/heads/master | 2023-08-19T14:10:10.494355 | 2021-09-16T20:26:50 | 2021-09-16T20:26:50 | 290,560,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,733 | py | class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class FirstUnique(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.duplicates = set()
self.dummy = ListNode(0)
self.tail = self.dummy
self.num_to_prev_node = {}
for num in nums:
self.add(num)
def showFirstUnique(self):
"""
:rtype: int
"""
if self.dummy.next:
return self.dummy.next.val
return -1
def add(self, value):
"""
:type value: int
:rtype: None
"""
# Check whether there is any duplicate first
if value in self.duplicates:
return
if value not in self.num_to_prev_node:
self.extend_link_list(value)
return
self.duplicates.add(value)
self.remove(value)
def extend_link_list(self, value):
self.tail.next = ListNode(value)
self.num_to_prev_node[value] = self.tail
self.tail = self.tail.next
def remove(self, value):
prev_node = self.num_to_prev_node[value]
prev_node.next = prev_node.next.next
# Need to check whether the next node is valid value or None
if prev_node.next:
node_value = prev_node.next.val
# Update hash table
self.num_to_prev_node[node_value] = prev_node
else:
# Update & move self.tail
self.tail = prev_node
del self.num_to_prev_node[value]
# Your FirstUnique object will be instantiated and called as such:
# obj = FirstUnique(nums)
# param_1 = obj.showFirstUnique()
# obj.add(value)
| [
"39969716+paul0920@users.noreply.github.com"
] | 39969716+paul0920@users.noreply.github.com |
351465cd5d103b3015853288b8258f66980667a7 | 6fd5d30cf21716893388442eb0f9c16e13b91315 | /ABC/126/c.py | 0dc724c417c71312c44ef93b5be6268dcf6ba8f9 | [] | no_license | mgmk2/atcoder-python | 23d45f3195977f1f5839f6a6315e19cac80da2be | beec5857a8df2957ff7b688f717d4253b4196e10 | refs/heads/master | 2021-06-09T20:00:22.500222 | 2021-05-04T15:36:39 | 2021-05-04T15:36:39 | 179,711,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | n, k = map(int, input().split())
p = 0
for i in range(1, n + 1):
pp = 1 / n
x = i
while(x < k):
x *= 2
pp *= 0.5
p += pp
print(p)
| [
"xis.t65@gmail.com"
] | xis.t65@gmail.com |
93c750e5bdabcb3777de20ca4eed00a264d0e2fe | 9848584d5f1858692fb4cdbe793bc91ed3be920e | /coding/00140-word-break-2/solution.py | 75105c1a14a80febf2af16e768ebd2d7e030105c | [] | no_license | misaka-10032/leetcode | 1212223585cc27d3dfc6d2ca6a27770f06e427e3 | 20580185c6f72f3c09a725168af48893156161f5 | refs/heads/master | 2020-12-12T09:45:31.491801 | 2020-09-14T00:18:19 | 2020-09-14T00:18:19 | 50,267,669 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | #!/usr/bin/env python3
# encoding: utf-8
from typing import List
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
cache = [None for _ in range(len(s))]
# Searches all the possible sentences starting at i (aka s[i:]).
def _search(i: int) -> List[str]:
if cache[i] is not None:
return cache[i]
sentences = []
for word in wordDict:
if s.startswith(word, i):
if i+len(word) == len(s):
sentences.append(word)
else:
sentences_after_word = _search(i+len(word))
for sentence in sentences_after_word:
sentences.append(' '.join([word, sentence]))
cache[i] = sentences
return sentences
if not s:
return []
return _search(0)
| [
"longqicai@gmail.com"
] | longqicai@gmail.com |
9ea7ed67d3760852f9f7767573808a2a70feaed6 | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/website_slides/tests/test_ui_wslides.py | 7338295a1db6623b8786633e158556134d748251 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 7,486 | py | # Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
from dateutil.relativedelta import relativedelta
from odoo import tests
from odoo.fields import Datetime
from odoo.modules.module import get_module_resource
from odoo.addons.base.tests.common import HttpCaseWithUserDemo, HttpCaseWithUserPortal
class TestUICommon(HttpCaseWithUserDemo, HttpCaseWithUserPortal):
def setUp(self):
super(TestUICommon, self).setUp()
# Load pdf and img contents
pdf_path = get_module_resource('website_slides', 'static', 'src', 'img', 'presentation.pdf')
pdf_content = base64.b64encode(open(pdf_path, "rb").read())
img_path = get_module_resource('website_slides', 'static', 'src', 'img', 'slide_demo_gardening_1.jpg')
img_content = base64.b64encode(open(img_path, "rb").read())
self.env['slide.channel'].create({
'name': 'Basics of Gardening - Test',
'user_id': self.env.ref('base.user_admin').id,
'enroll': 'public',
'channel_type': 'training',
'allow_comment': True,
'promote_strategy': 'most_voted',
'is_published': True,
'description': 'Learn the basics of gardening !',
'create_date': Datetime.now() - relativedelta(days=8),
'slide_ids': [
(0, 0, {
'name': 'Gardening: The Know-How',
'sequence': 1,
'datas': pdf_content,
'slide_type': 'presentation',
'is_published': True,
'is_preview': True,
}), (0, 0, {
'name': 'Home Gardening',
'sequence': 2,
'image_1920': img_content,
'slide_type': 'infographic',
'is_published': True,
}), (0, 0, {
'name': 'Mighty Carrots',
'sequence': 3,
'image_1920': img_content,
'slide_type': 'infographic',
'is_published': True,
}), (0, 0, {
'name': 'How to Grow and Harvest The Best Strawberries | Basics',
'sequence': 4,
'datas': pdf_content,
'slide_type': 'document',
'is_published': True,
}), (0, 0, {
'name': 'Test your knowledge',
'sequence': 5,
'slide_type': 'quiz',
'is_published': True,
'question_ids': [
(0, 0, {
'question': 'What is a strawberry ?',
'answer_ids': [
(0, 0, {
'text_value': 'A fruit',
'is_correct': True,
'sequence': 1,
}), (0, 0, {
'text_value': 'A vegetable',
'sequence': 2,
}), (0, 0, {
'text_value': 'A table',
'sequence': 3,
})
]
}), (0, 0, {
'question': 'What is the best tool to dig a hole for your plants ?',
'answer_ids': [
(0, 0, {
'text_value': 'A shovel',
'is_correct': True,
'sequence': 1,
}), (0, 0, {
'text_value': 'A spoon',
'sequence': 2,
})
]
})
]
})
]
})
@tests.common.tagged('post_install', '-at_install')
class TestUi(TestUICommon):
def test_course_member_employee(self):
user_demo = self.user_demo
user_demo.flush()
user_demo.write({
'groups_id': [(5, 0), (4, self.env.ref('base.group_user').id)]
})
self.browser_js(
'/slides',
'odoo.__DEBUG__.services["web_tour.tour"].run("course_member")',
'odoo.__DEBUG__.services["web_tour.tour"].tours.course_member.ready',
login=user_demo.login)
def test_course_member_elearning_officer(self):
user_demo = self.user_demo
user_demo.flush()
user_demo.write({
'groups_id': [(5, 0), (4, self.env.ref('base.group_user').id), (4, self.env.ref('website_slides.group_website_slides_officer').id)]
})
self.browser_js(
'/slides',
'odoo.__DEBUG__.services["web_tour.tour"].run("course_member")',
'odoo.__DEBUG__.services["web_tour.tour"].tours.course_member.ready',
login=user_demo.login)
def test_course_member_portal(self):
user_portal = self.user_portal
user_portal.flush()
self.browser_js(
'/slides',
'odoo.__DEBUG__.services["web_tour.tour"].run("course_member")',
'odoo.__DEBUG__.services["web_tour.tour"].tours.course_member.ready',
login=user_portal.login)
def test_full_screen_edition_website_publisher(self):
# group_website_designer
user_demo = self.env.ref('base.user_demo')
user_demo.flush()
user_demo.write({
'groups_id': [(5, 0), (4, self.env.ref('base.group_user').id), (4, self.env.ref('website.group_website_publisher').id)]
})
self.browser_js(
'/slides',
'odoo.__DEBUG__.services["web_tour.tour"].run("full_screen_web_editor")',
'odoo.__DEBUG__.services["web_tour.tour"].tours.full_screen_web_editor.ready',
login=user_demo.login)
@tests.common.tagged('external', 'post_install', '-standard', '-at_install')
class TestUiYoutube(HttpCaseWithUserDemo):
def test_course_member_yt_employee(self):
# remove membership because we need to be able to join the course during the tour
user_demo = self.user_demo
user_demo.flush()
user_demo.write({
'groups_id': [(5, 0), (4, self.env.ref('base.group_user').id)]
})
self.env.ref('website_slides.slide_channel_demo_3_furn0')._remove_membership(self.env.ref('base.partner_demo').ids)
self.browser_js(
'/slides',
'odoo.__DEBUG__.services["web_tour.tour"].run("course_member_youtube")',
'odoo.__DEBUG__.services["web_tour.tour"].tours.course_member_youtube.ready',
login=user_demo.login)
def test_course_publisher_elearning_manager(self):
user_demo = self.user_demo
user_demo.flush()
user_demo.write({
'groups_id': [(5, 0), (4, self.env.ref('base.group_user').id), (4, self.env.ref('website_slides.group_website_slides_manager').id)]
})
self.browser_js(
'/slides',
'odoo.__DEBUG__.services["web_tour.tour"].run("course_publisher")',
'odoo.__DEBUG__.services["web_tour.tour"].tours.course_publisher.ready',
login=user_demo.login)
| [
"36736117+SHIVJITH@users.noreply.github.com"
] | 36736117+SHIVJITH@users.noreply.github.com |
1097b0a0571d6e2b69a5b26de3ac21eaf5ecd112 | 95badd05be1f7e995d9108d7eaf671dad33f903c | /home/migrations/0002_customtext_homepage.py | 1afe808fb46df255a5057e30f1f4ede6e17e3776 | [] | no_license | crowdbotics-apps/test-27786 | c65ca32485fd6107030154e7587e1b14487497e0 | caa7fcd2aa40503cb815d3904c387fcaf54ce970 | refs/heads/master | 2023-05-14T19:16:06.918352 | 2021-06-07T02:24:55 | 2021-06-07T02:24:55 | 374,510,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # Generated by Django 2.2.20 on 2021-06-07 02:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0001_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='CustomText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4528ae758d5b4d7defd06d9e176db0f4723381a0 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/avs/get_datastore.py | bd68caf5cf28a5844cf9215fd3e4d7a42d123eba | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,816 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetDatastoreResult',
'AwaitableGetDatastoreResult',
'get_datastore',
]
@pulumi.output_type
class GetDatastoreResult:
"""
A datastore resource
"""
def __init__(__self__, disk_pool_volume=None, id=None, name=None, net_app_volume=None, provisioning_state=None, type=None):
if disk_pool_volume and not isinstance(disk_pool_volume, dict):
raise TypeError("Expected argument 'disk_pool_volume' to be a dict")
pulumi.set(__self__, "disk_pool_volume", disk_pool_volume)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if net_app_volume and not isinstance(net_app_volume, dict):
raise TypeError("Expected argument 'net_app_volume' to be a dict")
pulumi.set(__self__, "net_app_volume", net_app_volume)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="diskPoolVolume")
def disk_pool_volume(self) -> Optional['outputs.DiskPoolVolumeResponse']:
"""
An iSCSI volume
"""
return pulumi.get(self, "disk_pool_volume")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="netAppVolume")
def net_app_volume(self) -> Optional['outputs.NetAppVolumeResponse']:
"""
An Azure NetApp Files volume
"""
return pulumi.get(self, "net_app_volume")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The state of the datastore provisioning
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDatastoreResult(GetDatastoreResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatastoreResult(
disk_pool_volume=self.disk_pool_volume,
id=self.id,
name=self.name,
net_app_volume=self.net_app_volume,
provisioning_state=self.provisioning_state,
type=self.type)
def get_datastore(cluster_name: Optional[str] = None,
datastore_name: Optional[str] = None,
private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatastoreResult:
"""
A datastore resource
API Version: 2021-01-01-preview.
:param str cluster_name: Name of the cluster in the private cloud
:param str datastore_name: Name of the datastore in the private cloud cluster
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['datastoreName'] = datastore_name
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:avs:getDatastore', __args__, opts=opts, typ=GetDatastoreResult).value
return AwaitableGetDatastoreResult(
disk_pool_volume=__ret__.disk_pool_volume,
id=__ret__.id,
name=__ret__.name,
net_app_volume=__ret__.net_app_volume,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
d85653339ed548579ccc3ca4c68fbf947769f96d | 4e30d990963870478ed248567e432795f519e1cc | /tests/models/validators/v3_1_1/jsd_c37778a2faa5552894cc60cec13c56c7.py | 0e77d342731ea99cb71a16f2b95d3224ccf266d2 | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 3,817 | py | # -*- coding: utf-8 -*-
"""Identity Services Engine getGuestSsid data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorC37778A2Faa5552894Cc60Cec13C56C7(object):
"""getGuestSsid request schema definition."""
def __init__(self):
super(JSONSchemaValidatorC37778A2Faa5552894Cc60Cec13C56C7, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"SearchResult": {
"properties": {
"nextPage": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"previousPage": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"resources": {
"items": {
"properties": {
"id": {
"type": "string"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"name": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"total": {
"type": "integer"
}
},
"type": "object"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
268ed2fbcce7f04d15de4cf001dcfde0ec0d8237 | 41c64b0495902c111ab627b71a6a5e54a8e12bcf | /category/TensorFlow/scope_diff.py | ccb5d4d894f1af6c833d93ea6f7eab3f854c7b09 | [] | no_license | ZhangRui111/Rui_utils | 80c335e3fcfa8e7e88accf7af6079ed7b7b30a25 | 5e1a303cf2dab26f2ec092d0953f09354a787079 | refs/heads/master | 2021-10-08T16:03:13.636857 | 2021-10-08T05:01:20 | 2021-10-08T05:01:20 | 160,489,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | """
It is for the purpose of the variable sharing mechanism that a separate type
of scope (variable scope) was introduced.
As a result, we end up having two different types of scopes:
- name scope, created using tf.name_scope
- variable scope, created using tf.variable_scope
Both scopes have the same effect on all operations as well as variables created
using tf.Variable, i.e., the scope will be added as a prefix to the operation or
variable name.
However, name scope is ignored by tf.get_variable.
"""
import tensorflow as tf
with tf.name_scope("foo"):
with tf.variable_scope("var_scope"):
with tf.name_scope("foo2"):
a = tf.get_variable(name="a", shape=[1])
b = tf.Variable(1, name="b", dtype=tf.float32)
with tf.name_scope("bar"):
with tf.variable_scope("var_scope", reuse=True):
with tf.name_scope("bar2"):
a1 = tf.get_variable("a", shape=[1])
b1 = tf.Variable(1, name="b", dtype=tf.float32)
print(a.name) # var_scope/var:0
print(a1.name) # var_scope/var:0
print(id(a.name))
print(id(a1.name))
assert a == a1
assert id(a) == id(a1)
print(b.name) # foo/var_scope/foo2/w:0
print(b1.name) # bar/var_scope/bar2/w:0
print(id(b))
print(id(b1))
try:
assert id(b) == id(b1)
except AssertionError:
print("id(b) != id(b1)")
try:
with tf.name_scope("bar"):
with tf.variable_scope("var_scope"):
with tf.name_scope("foo2"):
a = tf.get_variable("a", shape=[1])
except ValueError as err:
print(err)
try:
with tf.name_scope("bar"):
with tf.variable_scope("var_scope", reuse=True):
with tf.name_scope("foo2"):
a2 = tf.get_variable("a", shape=[2])
except ValueError as err:
print(err)
| [
"zhangruisg111@163.com"
] | zhangruisg111@163.com |
f63b3f17855c6ebe013cc3f02795411f3145f368 | 612325535126eaddebc230d8c27af095c8e5cc2f | /src/build/linux/sysroot_scripts/build_and_upload.py | 8acee0223a23e771d9d50ff16b587e00632765b0 | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,327 | py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automates running BuildPackageLists, BuildSysroot, and
UploadSysroot for each supported arch of each sysroot creator.
"""
import glob
import hashlib
import json
import multiprocessing
import os
import re
import string
import subprocess
import sys
def run_script(args):
fnull = open(os.devnull, 'w')
subprocess.check_call(args, stdout=fnull, stderr=fnull)
def sha1sumfile(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def get_proc_output(args):
return subprocess.check_output(args).strip()
def build_and_upload(script_path, distro, release, arch, lock):
# TODO(thomasanderson): Find out which revision 'git-cl upload' uses to
# calculate the diff against and use that instead of HEAD.
script_dir = os.path.dirname(os.path.realpath(__file__))
revision = get_proc_output(['git', '-C', script_dir, 'rev-parse', 'HEAD'])
run_script([script_path, 'UpdatePackageLists%s' % arch])
run_script([script_path, 'BuildSysroot%s' % arch])
run_script([script_path, 'UploadSysroot%s' % arch, revision])
tarball = '%s_%s_%s_sysroot.tgz' % (distro, release, arch.lower())
tgz_path = os.path.join(script_dir, "..", "..", "..", "out", "sysroot-build",
release, tarball)
sha1sum = sha1sumfile(tgz_path)
sysroot_dir = '%s_%s_%s-sysroot' % (distro, release, arch.lower())
sysroot_metadata = {
'Revision': revision,
'Tarball': tarball,
'Sha1Sum': sha1sum,
'SysrootDir': sysroot_dir
}
with lock:
with open(os.path.join(script_dir, 'sysroots.json'), 'rw+') as f:
sysroots = json.load(f)
sysroots["%s_%s" % (release, arch.lower())] = sysroot_metadata
f.seek(0)
f.truncate()
f.write(json.dumps(sysroots, sort_keys=True, indent=4,
separators=(',', ': ')))
f.write('\n')
def main():
script_dir = os.path.dirname(os.path.realpath(__file__))
procs = []
lock = multiprocessing.Lock()
for filename in glob.glob(os.path.join(script_dir, 'sysroot-creator-*.sh')):
script_path = os.path.join(script_dir, filename)
distro = get_proc_output([script_path, 'PrintDistro'])
release = get_proc_output([script_path, 'PrintRelease'])
architectures = get_proc_output([script_path, 'PrintArchitectures'])
for arch in architectures.split('\n'):
proc = multiprocessing.Process(target=build_and_upload,
args=(script_path, distro, release, arch,
lock))
procs.append(("%s %s (%s)" % (distro, release, arch), proc))
proc.start()
for _, proc in procs:
proc.join()
print "SYSROOT CREATION SUMMARY"
failures = 0
for name, proc in procs:
if proc.exitcode:
failures += 1
status = "FAILURE" if proc.exitcode else "SUCCESS"
print "%s sysroot creation\t%s" % (name, status)
return failures
if __name__ == '__main__':
sys.exit(main())
| [
"2100639007@qq.com"
] | 2100639007@qq.com |
17f16c6b0242cfa8b0c0dee505972f84dcf42559 | 9d5ae8cc5f53f5aee7247be69142d9118769d395 | /508. Most Frequent Subtree Sum.py | 9e55bad2a74e8c0daa5b1a1c6df059b01657157c | [] | no_license | BITMystery/leetcode-journey | d4c93319bb555a7e47e62b8b974a2f77578bc760 | 616939d1599b5a135747b0c4dd1f989974835f40 | refs/heads/master | 2020-05-24T08:15:30.207996 | 2017-10-21T06:33:17 | 2017-10-21T06:33:17 | 84,839,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import Counter
class Solution(object):
def helper(self, root, sums):
if None == root:
return 0
s = root.val
s += self.helper(root.left, sums) + self.helper(root.right, sums)
sums.append(s)
return s
def findFrequentTreeSum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
if None == root:
return []
sums = []
self.helper(root, sums)
c = Counter(sums)
max_freq = max([item[1] for item in c.items()])
for item in c.items():
if item[1] == max_freq:
res.append(item[0])
return res
| [
"noreply@github.com"
] | BITMystery.noreply@github.com |
f01985c1ec6968aaf7c366012fe7663cc0dcf5ca | 314245750f897949bc7867883d22b8ff1465fbe1 | /implement/wallInspection.py | c031862aaae80371d8d7ecb0c1912b7a1b7118f6 | [] | no_license | dongho108/CodingTestByPython | e608d70235cc6c6a27c71eea86ee28d1271d4d1d | 475b3665377a8f74944d7698e894ad3eafc49ad4 | refs/heads/master | 2023-05-24T15:01:56.563359 | 2021-07-01T14:23:20 | 2021-07-01T14:23:20 | 330,833,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | from itertools import permutations
def solution(n, weak, dist):
answer = 0
length = len(weak)
for i in range(length):
weak.append(n+weak[i])
answer = len(dist) + 1
for start in range(length): # 취약점들을 시작점으로 두려고
for friends in list(permutations(dist, len(dist))): # 모든 경우의 친구들 순서
count = 1
position = weak[start] + friends[count-1]
for index in range(start, start + length): # 친구 늘릴떄마다 다음 취약점 자리 구하려고
if position < weak[index]: # 갈 수 있는 범위 < 현재 타겟 취약점
count += 1 # 친구 한명 더 필요
if count > len(dist): # 더이상 친구 없으면 끝
break
# 친구 한명 더 늘린만큼 범위 넓어짐
# 다음 취약점에서부터 그 추가된 친구의 범위만큼 커짐
position = weak[index] + friends[count-1]
answer = min(answer, count)
if answer > len(dist): # answer이 처음이랑 똑같음 (친구 다 해도 커버못함)
return -1
return answer
print(solution(12, [1,5,6,10], [1,2,3,4]))
print(solution(12, [1,3,4,9,10], [3,5,7])) | [
"dongho108@naver.com"
] | dongho108@naver.com |
bdb17c8a2d12915f16193ddd1093df7f98bab53b | 4f801ddc0d7df23c2de9fcb887a4292517d9467b | /room/settings.py | cf582f25f21ab6d9121ebc01f3c43cba0db8f1b3 | [] | no_license | maxhasan882/create_game_room | c3ca78d070f1194510226358b010bf40f80ede2e | 80363eb645c31e7f52a5bbe678d71c0a90700214 | refs/heads/master | 2021-02-24T02:05:37.474817 | 2020-03-13T10:35:04 | 2020-03-13T10:35:04 | 245,417,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,468 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import queue
from datetime import timedelta
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's%3mtr9z$_^k1d-9v%28vnrbz67!ek!cqli_rfq#*pf=j48ld3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
CHECK = True
st = set()
st1 = set()
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'channels',
'match',
'user'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'room.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'room.wsgi.application'
ASGI_APPLICATION = 'room.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=600),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'JTI_CLAIM': 'jti',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Dhaka'
AWS_BASE_URL = 'https://myjaxrdi39.execute-api.us-east-1.amazonaws.com/dev/'
| [
"rhmithu50@gmail.com"
] | rhmithu50@gmail.com |
8130b6dc8cd35a0f986b99d18b20afe35a90a61e | 1fd0991e163e3cbb2286a5ea30d8143a63107b8d | /fst_defs.py | 806543e8ed82a5f619fb932728d1e6e50293a0ea | [] | no_license | mnunberg/intellibak | 37d111566e015b3f143738a42e95796536d1aba0 | 79a0ce8eb76282a2079978d67ead8eb93357ae47 | refs/heads/master | 2020-05-20T23:54:10.204337 | 2011-09-15T07:43:52 | 2011-09-15T07:43:52 | 2,391,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | #!/usr/bin/env python2.6
import fscopy
import lvmnested
from fs import FSType
fst_list = []
for nval, obj in enumerate(
(("Linux", "ext[234]|jfs|reiser|xfs|jfs", fscopy.TarArchive),
("UFS_BSD", "unix fast file system", fscopy.UFSDump),
("NTFS", None, fscopy.NTFSClone),
("LVM", "lvm|lvm2", lvmnested.LVMNestedBackup),
("SWAP", "swap", fscopy.SwapInfo))
):
name, mtxt, bu_method = obj
__fst = FSType(name, nval, mtxt,backupmethod=bu_method)
print __fst.name, "initialized"
fst_list.append(__fst)
defaultFST = FSType("COPY", -1, "", backupmethod=fscopy.ImageDump)
| [
"mnunberg@haskalah.org"
] | mnunberg@haskalah.org |
f41a1c4f606667b441de60c227adfb91c2887858 | fdd836226529a1d0098a8dd9c2014a53e645bcc9 | /rocksdb/quizup/analysis/latency/mutant-latency-local-ssd-ebs-mag-by-sstable-otts/Conf.py | de5d933d83550e9766834e2d50b3fb3c6b1ac999 | [] | no_license | hobinyoon/mutant-misc | 41520b952964421c016d973dbd6cd29356da1f25 | b6ea031b00cda723dbd20a79884ec460c8e7c5ff | refs/heads/master | 2021-01-02T23:02:51.782760 | 2018-03-09T18:03:25 | 2018-03-09T18:03:25 | 99,450,632 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | import os
import yaml
class Manifest:
_doc = None
_init = False
@staticmethod
def _Init():
if Manifest._init:
return
fn_manifest = "%s/work/mutant/misc/rocksdb/log/manifest.yaml" % os.path.expanduser("~")
with open(fn_manifest, "r") as f:
Manifest._doc = yaml.load(f)
Manifest._init = True
@staticmethod
def Get(k):
Manifest._Init()
return Manifest._doc[k]
@staticmethod
def GetDir(k):
Manifest._Init()
return Manifest._doc[k].replace("~", os.path.expanduser("~"))
def StgCost(dev):
# $/GB/Month
# Local SSD 0.528
# EBS SSD (gp2) 0.100
# EBS HDD thrp. opt. (st1) 0.045
# EBS HDD cold (sc1) 0.025
storage_cost = {
"local-ssd1": 0.528
, "ebs-gp2": 0.100
, "ebs-st1": 0.045
, "ebs-sc1": 0.025}
return storage_cost[dev]
| [
"hobinyoon@gmail.com"
] | hobinyoon@gmail.com |
acee531285f3512277e5a1f525bae30cf58510c3 | 22e5656f437a32dc7f22799ed73a952b8799b40b | /utils/django/routers.py | 79714470c00a1e2f3dd39a1d6f08bd36cec90df1 | [] | no_license | smpio/python-utils | 0ffff742649368f4b0bd59c42ce80c950c132b95 | 7e159041b0203faef93f359346c553a56437d4cb | refs/heads/master | 2023-04-14T01:27:12.900232 | 2023-04-06T14:39:47 | 2023-04-06T14:39:47 | 127,027,145 | 1 | 4 | null | 2023-04-06T10:07:40 | 2018-03-27T18:08:16 | Python | UTF-8 | Python | false | false | 465 | py | from rest_framework import routers
class NoDetailTrailingSlashMixin:
def get_routes(self, viewset):
return [self._fix_route(route) for route in super().get_routes(viewset)]
@staticmethod
def _fix_route(route):
if '{lookup}' in route.url:
return route._replace(url=route.url.replace('{trailing_slash}', ''))
else:
return route
class Router(NoDetailTrailingSlashMixin, routers.DefaultRouter):
pass
| [
"dbashkatov@gmail.com"
] | dbashkatov@gmail.com |
7350d72cc5eaeff79fde1cade70451d9c4967c96 | 61004e474b7b2ad0071c16766f0f7874f04f9466 | /examples/python-cicd-with-cloudbuilder/my_module/my_module.py | e89f6d7695a72e218ae1ba53c5cb94576b33a3fd | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/professional-services | eb79751efae765a8c691a745e520f44f51bd715c | 0f51121b945bd74c7f667e74e8861fceda87565c | refs/heads/main | 2023-09-05T02:57:33.328973 | 2023-08-30T14:40:30 | 2023-08-30T14:40:30 | 91,730,359 | 2,626 | 1,381 | Apache-2.0 | 2023-09-14T20:13:42 | 2017-05-18T19:29:27 | Python | UTF-8 | Python | false | false | 2,580 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample module used to demonstrate CI with:
- Pytest
- Cloud Source Repositories
- Cloud Builder
Tutorial found in README.md.
"""
import numpy as np
def is_numeric(x):
return True if type(x) in [int, float] else False
def add(a, b):
"""Adds two numbers, a and b.
Args:
a: A numeric variable.
b: A numeric variable.
Returns:
A numeric variable that is the sum of a and b.
"""
for var in [a,b]:
if not is_numeric(var):
raise TypeError("Inputs a and b must be an int or float, "
"but {} was passed".format(var))
return a + b
def square(x):
"""Returns the square of x.
Args:
x: A numeric variable.
Returns:
The square of x.
"""
if not is_numeric(x):
raise TypeError("Input x must be an int or float, "
"but {} was passed".format(x))
return x ** 2
def log_transform(x, const=1):
"""Log Transforms x.
Returns the natural log transform of x, to reduce the skewedness for some
distribution X.
For more on why/when to use a log transformation,
read here: http://onlinestatbook.com/2/transformations/log.html.
Args:
x: A numeric variable to transform.
const: A constant to add to x to prevent taking the log of 0.
Returns:
log (x + const).
Raises:
ValueError: Raises a value error if const <= 0.
"""
if const <= 0:
raise ArithmeticError("Constant const must be greater than 0, not {}"
.format(const))
if not is_numeric(x):
raise TypeError("Input x must be an int or float, "
"but {} was passed".format(x))
return np.log(x + const)
def main(): # pragma: no cover
"""Driver loop for the example code under test"""
a = 5
b = 10
total = add(a, b)
print("The sum of {} and {} is {}.".format(a, b, total))
print("The square of {} is {}.".format(3, square(3)))
print("The ln of 10 is {}.".format(log_transform(10)))
if __name__ == "__main__": # pragma: no cover
main()
| [
"jferriero@google.com"
] | jferriero@google.com |
71856eef1c928822337eb452b459376fa2805d37 | b0a894acdec7cee431ffe21381c4ec633900ecba | /python/responses/discount_test.py | ddd68389addfb7e7d47f1b2d963e746b94dee55e | [] | no_license | polikashechkin/discount | bca6b0f5ac9686851d3ca1bf743584c8e2cbe13c | 2141435b6eace059064fd19312f4234da0e98c7c | refs/heads/main | 2023-02-08T16:15:49.798061 | 2021-01-04T23:13:43 | 2021-01-04T23:13:43 | 326,829,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | import flask, os
from domino.core import log, DOMINO_ROOT
from . import Response as BaseResponse
class Response(BaseResponse):
def __init__(self, application, request):
super().__init__(application, request)
def __call__(self):
discount_test_py = os.path.join(DOMINO_ROOT, 'products', 'discount', 'active', 'python', 'discount_test', 'discount_test.py')
#file = os.path.join(DOMINO_ROOT, 'jobs', job_id, file_name)
#log.debug(f'download_job_file : {file} : {os.path.getsize(file)} : {file_name}')
with open(discount_test_py, 'rb') as f:
response = flask.make_response(f.read())
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-Description'] = 'File Transfer'
response.headers['Content-Disposition'] = f'attachment; filename=discount_test.py'
response.headers['Content-Length'] = os.path.getsize(discount_test_py)
return response
| [
"polikash@gmail.com"
] | polikash@gmail.com |
3c4da521dd7d78f7569acebeff32954d90f6f598 | 27a066c48096e30e3cf4a795edf6e8387f63728b | /mysite/mzitu/views/chart.py | 5df8820510ddc9505ba2079c7e2b7c33c52c79cd | [] | no_license | 26huitailang/django-tutorial | 2712317c3f7514743e90fb4135e5fe3fed5def90 | 28a0b04ee3b9ca7e2d6e84e522047c63b0d19c8f | refs/heads/master | 2023-01-07T11:55:37.003245 | 2019-09-04T09:19:50 | 2019-09-04T09:19:50 | 113,199,279 | 1 | 0 | null | 2023-01-03T15:24:01 | 2017-12-05T15:27:52 | Python | UTF-8 | Python | false | false | 1,616 | py | # coding: utf-8
from rest_framework import status
from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from rest_framework.decorators import action
from mzitu.models.proxy_ip import ProxyIp
from mzitu.models.tag import Tag
from mzitu.serializers import MzituDownloadedSuiteSerializer, TagSerializer
from django_vises.runtimes.misc import sort_dict_list
class ChartViewSet(GenericViewSet):
serializer_class = MzituDownloadedSuiteSerializer
queryset = ProxyIp.objects
@action(methods=['GET'], detail=False)
def tags_bar(self, request):
tag_objs = Tag.objects.all()
serializer_data = TagSerializer(tag_objs, many=True).data
serializer_data_order_by_suites_count = sort_dict_list(serializer_data, 'suites_count')
title = 'Tags统计'
data = [x['suites_count'] for x in serializer_data_order_by_suites_count]
response = {
'title': title,
'axisLabel': [x['name'] for x in serializer_data_order_by_suites_count],
'data': data,
'maxCount': max(data) if data else 0
}
return Response(response, status=status.HTTP_200_OK)
@action(methods=['GET'], detail=False)
def proxyips_bar(self, request):
query_data = ProxyIp.group_by_score()
title = 'ProxyIp score统计'
count_list = [x[1] for x in query_data]
response = {
'title': title,
'data': query_data,
'maxCount': max(count_list) if count_list else 0
}
return Response(response, status=status.HTTP_200_OK)
| [
"26huitailang@gmail.com"
] | 26huitailang@gmail.com |
e334daf0b31e4360e6cdf2afd408c0e88fdc5bf2 | fb4f1eb0cef8b1b3f37c5c175a64954976e8becb | /main test psp.py | a8efbaf342d0724fcc1b9ce8ccffd5f5de50aa69 | [
"MIT"
] | permissive | rtstock/rtstock4 | ccfbc7dbbe9015e12d2c96e49eb1680e720afedd | 040b3409cfb022767dde467578f359210a689512 | refs/heads/master | 2021-08-24T10:50:01.890638 | 2017-12-09T09:18:43 | 2017-12-09T09:18:43 | 113,656,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!flask/bin/python
from flask import Flask, jsonify
import pullstackedprices as psp
app = Flask(__name__)
symbols = ['MSFT','AMZN','GOOG']
fromdate,todate = '2017-09-01','2017-09-30'
df_prices = psp.stockpricesstacked(symbols,fromdate,todate)
tasks = df_prices.T.to_dict().values()
@app.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
if __name__ == '__main__':
app.run(debug=True)
| [
"justin.malinchak@gmail.com"
] | justin.malinchak@gmail.com |
b299fe35884d7f09b8f4486fbc769258fc4a72a4 | 871dddb5c8059d96b767a323b0f87d3fbb62e786 | /vint/ast/plugin/scope_plugin/__init__.py | dd9e9a6f46981cf74b1053802f7605f5df0c65e4 | [
"MIT"
] | permissive | msabramo/vint | 6ef12ed61d54d0d2b2a9d1da1ce90c0e2c734ab2 | f13569f2a62ff13ff8ad913e7d6fb2c57953af20 | refs/heads/master | 2023-08-24T01:20:14.699485 | 2014-12-31T18:28:59 | 2014-12-31T18:28:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from vint.ast.plugin.scope_plugin.reference_reachability_tester import (
ReferenceReachabilityTester as ScopePlugin,
REACHABILITY_FLAG,
REFERECED_FLAG,
)
| [
"yuki.kokubun@mixi.co.jp"
] | yuki.kokubun@mixi.co.jp |
5d68868ce1759fe705efb235f6ff71e8dd3ce84c | a10c6356db4ca2ecc5e5701601dc5dd18e295320 | /di5tian.py | 77c2cf0a1830e81c03bd578bc53ea4e95845b19b | [] | no_license | iefan/psy_python | 56f249729c7bb14b00868e9bda551c27e3189914 | 802b513703b166f459102fd831195c75e4d179c7 | refs/heads/master | 2020-05-09T12:41:06.535332 | 2019-10-14T13:46:07 | 2019-10-14T13:46:07 | 181,119,607 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | def jiajianqi():
x=input()
x=int(x)
if x>100:
x-=10
if x<100:
x+=10
print(x)
def panduanshuzi():
print("请输入一个数:")
n=input()
n=int(n)
if n%2==0:
print("它是偶数。")
if n%2==1:
print("它是奇数。")
def tiaosheng():
print("请输入1分钟跳绳次数:")
n=input()
n=int(n)
if n>=200:
print("跳绳达人!")
else:
print("继续努力!")
def jiajianqi1():
x=input()
x=int(x)
if x==10:
x+=1
else:
x-=1
print("x=",x)
def panduanshuzi2():
print("请输入一个整数:")
n=input()
n=int(n)
if n%2==1:
print("奇数。")
else:
print("偶数。")
if __name__ == "__main__":
panduanshuzi2()
# jiajianqi1()
# tiaosheng()
# panduanshuzi()
# jiajianqi()
| [
"mybsppp@163.com"
] | mybsppp@163.com |
eda46fbebf2ccc570887262cdd4be7f03a6b6fca | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Lazymux/routersploit/tests/creds/generic/test_ssh_default.py | e1dd5e99bdd91851a9ac265bd57fc4357e3b5bab | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f4d5753ff51381be86be84ae0ce056b28916e63591642d1f56d7d50f3b0580e6
size 600
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
f7a80e7026bb2ff0a23da37c912d27b519a0ade1 | 8c1aa957a41954daac70b13f1be06df0c4046bb2 | /wagtailwebsitebuilder/multisite/migrations/0007_seosettings.py | 13df3d6c31f0eae5eaa4ef0acd8b85d72b45c46c | [] | no_license | hanztura/wagtailwebsitebuilder | 6c1a2358d53877e4f70d70e5c7c6b472fabec974 | f56d1b799f9eda53b5596ed882b60df154581cc5 | refs/heads/master | 2021-05-21T08:30:16.170885 | 2020-08-29T22:35:59 | 2020-08-29T22:35:59 | 252,619,323 | 1 | 0 | null | 2021-04-16T20:26:46 | 2020-04-03T03:01:27 | Python | UTF-8 | Python | false | false | 876 | py | # Generated by Django 2.2.12 on 2020-04-23 16:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
('multisite', '0006_sitebranding_css'),
]
operations = [
migrations.CreateModel(
name='SeoSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('google_analytics_id', models.CharField(blank=True, default='UA-121442343-2', max_length=50)),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
]
| [
"hanztura@github.com"
] | hanztura@github.com |
9c28185c4c4eef844bafa20c6f6a7c3e67bb12ef | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/google/trees_and_graphs/737_sentence_similarity_II.py | 4cb050c3ac8492ac926734896cc01923fc0ff951 | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,017 | py | from typing import List
# NlogP + P
class Solution:
def areSentencesSimilarTwo(self, words1: List[str], words2: List[str], pairs: List[List[str]]) -> bool:
if len(words1) != len(words2):
return False
parent = {}
size = {}
def find(p):
parent.setdefault(p, p)
size.setdefault(p, 1)
root = p
while root != parent[root]:
root = parent[root]
while root != p:
newp = parent[p]
parent[p] = root
p = newp
return root
def union(p, q):
rootP = find(p)
rootQ = find(q)
if rootP == rootQ:
return
if size[rootP] > size[rootQ]:
parent[rootQ] = rootP
size[rootP] += size[rootQ]
else:
parent[rootP] = rootQ
size[rootQ] += size[rootP]
for a, b in pairs:
if find(a) != find(b):
union(a, b)
for u, v in zip(words1, words2):
if u == v:
continue
if find(u) != find(v):
return False
return True
if __name__ == '__main__':
s = Solution()
s.areSentencesSimilarTwo(["a","very","delicious","meal"],
["one","really","delicious","dinner"],
[["great","good"],["extraordinary","good"],["well","good"],["wonderful","good"],["excellent","good"],["fine","good"],["nice","good"],
["any","one"],["some","one"],["unique","one"],["the","one"],["an","one"],["single","one"],["a","one"],["truck","car"],
["wagon","car"],["automobile","car"],["auto","car"],["vehicle","car"],["entertain","have"],["drink","have"],
["eat","have"],["take","have"],["fruits","meal"],["brunch","meal"],["breakfast","meal"],["food","meal"],
["dinner","meal"],["super","meal"],["lunch","meal"],["possess","own"],["keep","own"],["have","own"],
["extremely","very"],["actually","very"],["really","very"],["super","very"]]) | [
"ar.smglln@gmail.com"
] | ar.smglln@gmail.com |
2adaf59542f49107aaf705211037eece8356908c | 06e1f3c6c1930e1072f4ba23b7b831b839f01863 | /apps/bot/commands/EasyCommands/Issues.py | 100feaaddffdfdbaf68ef8732631be4986edbcf5 | [
"MIT"
] | permissive | FuckBrains/petrovich | 8426a050cb9cf72d480f6113c8244dc20d9621dd | 274a0a32889726fb5db5995b865bf2a126b98779 | refs/heads/master | 2023-05-18T21:58:04.669670 | 2021-05-16T08:11:20 | 2021-05-16T08:11:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from apps.bot.classes.common.CommonCommand import CommonCommand
class Issues(CommonCommand):
name = "баги"
names = ["ишюс", "ишьюс", "иши"]
help_text = "список проблем"
def start(self):
return "https://github.com/Xoma163/petrovich/issues"
| [
"Xoma163rus@gmail.com"
] | Xoma163rus@gmail.com |
87fac722461a7dfc71a9030b1519b2d5f96c72ed | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/NasFPN/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py | 927609206e1323dcf1173c4a5393e3f03d534c0a | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 370 | py | _base_ = './faster_rcnn_r50_fpn_2x_coco.py'
model = dict(
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'))
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
d20611c382a270e6bedfebabc4b96fcff2d7239b | 2545624bbbf982aa6243acf8b0cb9f7eaef155d6 | /2020/round_2/pancake_2021_p_again/gen.py | 33b3cdba217ccd93bf8b8effea2925398b1b753e | [] | no_license | dprgarner/codejam | 9f420003fb48c2155bd54942803781a095e984d1 | d7e1134fe3fe850b419aa675260c4ced630731d0 | refs/heads/master | 2021-07-12T05:36:08.465603 | 2021-07-03T12:37:46 | 2021-07-03T12:37:46 | 87,791,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | cases = []
for i in range(1, 10):
for j in range(1, 10):
cases.append((i, j))
print(len(cases))
for i, j in cases:
print("{} {}".format(i, j))
| [
"dprgarner@gmail.com"
] | dprgarner@gmail.com |
8bdbc29333dfc50a656a1130f2b4d2525b0bfcd3 | bd6fd6bb82bf3179a4571c7a2ca3a030f5684c5c | /mundo1-Fundamentos/022 - Analisador de Textos.py | 424fbfc5d1608bd3da42e8531325cd47c9a2fb1b | [
"MIT"
] | permissive | jonasht/CursoEmVideo-CursoDePython3 | b3e70cea1df9f33f409c4c680761abe5e7b9e739 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | refs/heads/master | 2023-08-27T12:12:38.103023 | 2021-10-29T19:05:01 | 2021-10-29T19:05:01 | 276,724,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | nome = str(input('Nome?: ')).strip()
print('tudo maiusculo {}'.format(nome.upper()))
print('tudo minusculo {}'.format(nome.lower()))
nses = nome.replace(' ', '')
print('QTD de letras sem espaço {}'.format(len(nses)))
nd = nome.split()
qtdpn = len(nd[0])
print('QTD de letras do primeiro nome {}'.format(qtdpn)) | [
"jhenriquet@outlook.com.br"
] | jhenriquet@outlook.com.br |
053af721e4cec0a579f203ee57f45a2625da4492 | 862af34d5a1ebb3eb700e40c5877e394ee845b5f | /src/core/src/tortuga/kit/rhelOsKitOps.py | 30e23c97d265c1e3975abab78df519e6f6035d98 | [
"Apache-2.0"
] | permissive | ffxf/tortuga | 4e59617153de92cfc1a9b7bd95f8bae5ea8e1134 | a20ef7d0274be18bdaae6b9fbe879cd0473eaf1b | refs/heads/master | 2021-01-25T13:42:05.809188 | 2018-03-01T20:31:32 | 2018-03-01T20:31:32 | 123,608,729 | 0 | 0 | null | 2018-03-02T17:21:59 | 2018-03-02T17:21:59 | null | UTF-8 | Python | false | false | 3,791 | py | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from tortuga.boot.bootMediaTool import BootMediaTool
from tortuga.os_utility.osUtility \
import cpio_copytree, removeFile, make_symlink_farm
from tortuga.kit.osKitOps import OsKitOps
from tortuga.exceptions.copyError import CopyError
from tortuga.exceptions.fileAlreadyExists import FileAlreadyExists
from tortuga.exceptions.unrecognizedKitMedia import UnrecognizedKitMedia
from tortuga.kit.manager import KitManager
from tortuga.kit.utils import format_kit_descriptor
class KitOps(OsKitOps):
def prepareOSKit(self, srcPath):
version = self.osdistro.version \
if not self.mirror else self.osdistro.version.split('.', 1)[0]
kit = {
'ver': version,
'arch': self.osdistro.arch,
'name': self.osdistro.ostype,
}
kit_descr = format_kit_descriptor(kit['name'], kit['ver'], kit['arch'])
kit['sum'] = 'OS kit for %s %s' % (kit['name'], kit['ver'])
kit['initrd'] = 'initrd-%s.img' % (kit_descr)
kit['kernel'] = 'kernel-%s' % (kit_descr)
# Copy kernel & initrd to pxedir
if not os.path.exists(self.pxeboot_dir):
os.makedirs(self.pxeboot_dir)
bmt = BootMediaTool(srcPath)
# Check whether this is disc 1.
if bmt.getKernelPath() is None or bmt.getInitrdPath() is None:
raise UnrecognizedKitMedia("Please supply disc 1 first!")
try:
bmt.copyInitrd(os.path.join(self.pxeboot_dir, kit['initrd']), True)
# copy kernel to standardized name
bmt.copyKernel(os.path.join(self.pxeboot_dir, kit['kernel']), True)
except (CopyError, FileAlreadyExists, IOError) as exc:
# cleanup tmp stuff
self.logger.error(
'Error copying initrd and/or kernel from OS media'
' (exception=[%s])' % (exc))
# consider the kernel/initrd invalidated, remove them
removeFile(os.path.join(self.pxeboot_dir, kit['kernel']))
removeFile(os.path.join(self.pxeboot_dir, kit['initrd']))
raise
self.kit = kit
return kit
def _getRepoDir(self):
return self._cm.getYumKit(
self._osdistro.ostype.lower(),
self._osdistro.getVersion(),
self._osdistro.getArch())
def copyOsMedia(self, srcPath, **kwargs):
dstPath = self._getRepoDir()
if 'descr' in kwargs and kwargs['descr']:
print('Please wait... %s' % (kwargs['descr']))
if self._bUseSymlinks:
make_symlink_farm(srcPath, dstPath)
else:
cpio_copytree(srcPath, dstPath)
def addProxy(self, url):
self._logger.info('Proxy OS kit detected, no RPMs will be copied')
# Determine the "real" repo dir and the directory as apache
# sees it
realRepoDir = self._getRepoDir()
repoDir = realRepoDir[realRepoDir.index('/repos'):]
# Set proxy information in the apache component configuration file
self._logger.info(
'Enabling proxy for OS kit in web server configuration file')
# Configure proxy
KitManager().configureProxy(url, repoDir)
| [
"mfrisch@univa.com"
] | mfrisch@univa.com |
81198d05d7b9de729d65c4cfd9ca7322d8ce2914 | 0ba1743e9f865a023f72a14d3a5c16b99ee7f138 | /problems/test_0287_binarysearch.py | 765cd2ac10c5543cba973dcd972909865da6bf77 | [
"Unlicense"
] | permissive | chrisxue815/leetcode_python | d0a38a4168243b0628256825581a6df1b673855c | a33eb7b833f6998972e5340d383443f3a2ee64e3 | refs/heads/main | 2022-06-20T15:09:27.221807 | 2022-06-02T21:55:35 | 2022-06-02T21:55:35 | 94,590,264 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | import unittest
class Solution:
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lo = 1
hi = len(nums) - 1
while lo <= hi:
mid = lo + ((hi - lo) >> 1)
count_smaller = 0
count_mid = 0
for i in nums:
if i < mid:
count_smaller += 1
elif i == mid:
count_mid += 1
if count_mid > 1:
return mid
if count_smaller >= mid:
hi = mid - 1
else:
lo = mid + 1
raise RuntimeError
class Test(unittest.TestCase):
def test(self):
self._test([1, 2, 3, 4, 4, 5], 4)
self._test([4, 5, 3, 4, 1, 2], 4)
self._test([1, 2, 3, 4, 5, 5, 6], 5)
self._test([1, 3, 4, 5, 6, 6, 6], 6)
self._test([1, 3, 4, 5, 6, 6, 6, 7], 6)
self._test([1, 3, 4, 2, 1], 1)
def _test(self, nums, expected):
actual = Solution().findDuplicate(nums)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| [
"chrisxue815@gmail.com"
] | chrisxue815@gmail.com |
0216cd791452706346a47f22e45505688ecaa1b4 | c36b0d629ef5ad6242966de7142094c9164ada5b | /trackself/urls.py | 46d20f2b04748e6ee62ec06f472aed543f00ca32 | [] | no_license | fingerecho/trackingsite | 039f340d845759d50effb7f0270b587c2897085d | fad56ba8b31233536117339a70a25617b18fe853 | refs/heads/master | 2020-04-08T01:43:39.023302 | 2018-11-24T06:03:23 | 2018-11-24T06:03:23 | 158,907,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.urls import path
from . import views
from django.contrib import admin
urlpatterns = [
path(r'admin/',admin.site.urls),
path(r'index',views.index,name='index'),
]
| [
"m13001282105@163.com"
] | m13001282105@163.com |
182370860c50efe71212fbfbb24a0f86937877ac | 943dca755b940493a8452223cfe5daa2fb4908eb | /abc263/b.py | dd46fc2c62eda6ba999865235f599fb3eb0de688 | [] | no_license | ymsk-sky/atcoder | 5e34556582763b7095a5f3a7bae18cbe5b2696b2 | 36d7841b70b521bee853cdd6d670f8e283d83e8d | refs/heads/master | 2023-08-20T01:34:16.323870 | 2023-08-13T04:49:12 | 2023-08-13T04:49:12 | 254,348,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | n = int(input())
l = list(map(int, input().split()))
p = l[-1]
ans = 0
while 1:
ans += 1
if p == 1:
break
p = l[p - 2]
print(ans)
| [
"ymsk.sky.95@gmail.com"
] | ymsk.sky.95@gmail.com |
d2613d463f09241944ccd28d5c5081addcf81b51 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2726/60770/291984.py | 9c810f3e01a258e60602e7d11959c4bfd510c69f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | def solve():
tree=input()[1:-1].split(',')
res=float('inf')
def lsonOf(fa):
fa += 1
fa *= 2
fa -= 1
if fa>=len(tree):
return -1
return fa
def rsonOf(fa):
res=lsonOf(fa) + 1
if res==0:
return -1
return res
def isLeaf(p):
l=lsonOf(p)
r=rsonOf(p)
if (l==-1 or tree[l]=='null') and (r==-1 or tree[r]=='null'):
return True
return False
for p in range(len(tree)):
if isLeaf(p):
res=int((p+1)**0.5)+1
print(res)
return
if __name__ == '__main__':
solve()
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
cad26ad0f638787079323346b860726624b0212d | 425db5a849281d333e68c26a26678e7c8ce11b66 | /LeetCodeSolutions/LeetCode_0692.py | 858ba66f976157debb0ff7f5e396462d417c3430 | [
"MIT"
] | permissive | lih627/python-algorithm-templates | e8092b327a02506086414df41bbfb2af5d6b06dc | a61fd583e33a769b44ab758990625d3381793768 | refs/heads/master | 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from typing import List
import collections
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
counter = collections.Counter(words)
heap = [[-v, k] for (k, v) in counter.items()]
import heapq
heapq.heapify(heap)
return [heapq.heappop(heap)[1] for _ in range(k)]
| [
"lih627@outlook.com"
] | lih627@outlook.com |
17d271c6a427fff9b4dfe9932c56fde042095f96 | 0bd5f9a235f1399f7daa3539ba6d1f6be7a80912 | /Documents/aoomuki competences version 3/app_competences/migrations/0001_initial.py | bcf18ba6409fd354af19ac6fcab220108ed946c9 | [] | no_license | Kamelgasmi/matrice-aoomuki-version-3 | cb71943526dcc11523ed7df0bfab5d7efb686af2 | ca6b6f582aba6e20c5726eb7a87f4555172d4020 | refs/heads/master | 2023-04-07T11:31:57.959292 | 2021-04-13T12:56:21 | 2021-04-13T12:56:21 | 357,554,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | # Generated by Django 3.1.7 on 2021-04-12 13:39
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='ListWorkStation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True, verbose_name='nom')),
('commentary', models.CharField(max_length=250, verbose_name='commentaire')),
],
options={
'verbose_name': 'Liste postes de travail',
},
),
migrations.CreateModel(
name='Society',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True, verbose_name='Société')),
],
options={
'verbose_name': 'Société',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('username', models.CharField(blank=True, max_length=50, null=True)),
('last_name', models.CharField(max_length=50)),
('first_name', models.CharField(max_length=50)),
('is_active', models.BooleanField(default=True)),
('is_superuser', models.BooleanField(default=False)),
('is_collaborater', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('society', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app_competences.society')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
('workstation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app_competences.listworkstation')),
],
options={
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"kam_7@hotmail.fr"
] | kam_7@hotmail.fr |
56e2015c19dc92772cd6d0548b4d740fd39e9f45 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_058/ch78_2019_11_26_23_48_23_922955.py | 6c96774e2a01d0587cf432fa11cf584cfb020b05 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import math
corredores = {}
x = input('Digite um nome : ')
while x != 'sair':
y = float(input('Qual sua aceleração? '))
corredores[x] = y
x = input('Digite um nome : ')
def calcula_tempo(dicionario):
tempo = {}
for k,v in dicionario.items():
Vf = math.sqrt(0**2 + 2*v*(100 - 0))
t = (Vf-0)/v
tempo[k] = t
vencedor = {}
menor = 10000
i = 0
for n,te in tempo.items():
if te < menor:
menor = te
ven = n
i += 1
vencedor[ven] = maior
print('O vencedor é {} com tempo de conclusão de {} s'.format(ven,maior)
vencedor = calcula_tempo(corredores)
| [
"you@example.com"
] | you@example.com |
3fd13b7132ecb66410981b9c0ade681ac6e53469 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/35/usersdata/117/13608/submittedfiles/dec2bin.py | 30109acd2c7b1f8f7145532764154498a57aa105 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # -*- coding: utf-8 -*-
from __future__ import division
p=input('digite p:')
q=input('digite q:')
cont=0
a=p
wile p>0:
p=p//10
cont=cont+1
p=a
sub=0
while q>0:
ulti=q%(10**cont)
if ulti==p:
sub=bub+1
break
else:
q=q//10 | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e9f9cc1a0ee03e4ac6a36a4a894c784c4a493690 | ae844174eff5d14b8627ef8b32e66713f03772c8 | /Labs/Lab7/lab07files/lab7_check2.py | 80d5573f19e05584b1b05570c4c93f1f2e945988 | [] | no_license | tayloa/CSCI1100_Fall2015 | 1bd6250894083086437c7defceddacf73315b83b | 4ca1e6261e3c5d5372d3a097cb6c8601a2a8c1c6 | refs/heads/master | 2021-01-22T22:43:55.301293 | 2017-05-30T04:52:21 | 2017-05-30T04:52:21 | 92,784,700 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | def parse_line(line):
if line.count("/")<3:
return None
else:
line = line.split("/",3) #split can have a max
if (line[0].isdigit() == False) or (line[1].isdigit()==False) or (line[2].isdigit()==False):
return None
else:
line = (int(line[0]),int(line[1]),int(line[2]),line[3])
return line
def get_line(fname,parno,lineno):
files = open(fname)
whitespace = 1
lno = -1
skip = False
for i in files: #checking each line in the file
if i.strip() == '' and skip == False: #if the line is a blank space,increment whitespace by 1
whitespace += 1
skip = True
elif i.strip() == '' and skip == True: #if there is a blank space
continue #continue says "go back to the start of the loop",so keep incrementing whitespace
else:
skip = False #if the line ("i") is not a blankspace, skip = False, so stop the loop
if whitespace == parno: #all ifs always get evaluated, elifs get evaluated if the above is false
lno += 1 #when we have found the paragraph number,start looking at lines
if lno == lineno:
return i
filename = raw_input("Please enter file name => ")+".txt"
parnum = int(raw_input("Please enter paragraph number => "))
linenum = int(raw_input("Please enter line number => "))
print get_line(filename,parnum,linenum) | [
"halfnote1004@gmail.com"
] | halfnote1004@gmail.com |
0426299a5e2bc8d83f983d588f345dc253ccb2a7 | 0e77ebcc2b72b8e1b620d916336166badd38a887 | /Python项目开发实战/第4章 创建桌面应用/4.12 存储本地数据/test_locale.py | d58d1c235c8a2c703f4ad201e6c2331c2ed2fcbc | [] | no_license | L1nwatch/Mac-Python-3.X | ce70f2c3691a73db9256bcf47510c415d68591f0 | 73022b40d26ad09051329ae7ff8aae7201d8de6d | refs/heads/master | 2022-08-08T20:59:58.041245 | 2022-08-07T14:41:39 | 2022-08-07T14:41:39 | 50,715,245 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | #!/bin/env python3
# -*- coding: utf-8 -*-
# version: Python3.X
''' Mac OS X 10.10 Python3.4.4下测试locale
'''
__author__ = '__L1n__w@tch'
import locale as loc
import time
def main():
# 在不同会话中重复这些操作会看到一些差别,系统非常清楚地指定了不同的区域.货币使用了合适的符号
# 并且日期和时间与UK版本有很大的区别.
print(loc.setlocale(loc.LC_ALL, ""))
# print(loc.currency(350)) # 这一句报错了
print(time.strftime("%x %X", time.localtime()))
# 示例显示了字符串格式化指定器是如何同时为整数和浮点数工作的.
print("{:n}".format(3.14159))
print("{:n}".format(42))
# locale.strcoll()字符串比较示例是非常有用的,因为它们在字符排序中采用了特定于区域的想法.
# 如果第一个字符串有更"高"的值,返回值就是1,更低则返回-1,如果两个参数相同,则返回0
print(loc.strcoll("Spanish", "Inquisition"))
print(loc.strcoll("Inquisition", "Spanish"))
print(loc.strcoll("Spanish", "Spanish"))
# local提供了转换函数,这些函数在特定情况下是很有用的:atoi()、atof()、str()、format()、format_string()
if __name__ == "__main__":
main()
| [
"490772448@qq.com"
] | 490772448@qq.com |
c9bd445000a1cf3113f03ab73d9ab5e2fae14d37 | f0eadce9fa0a2cc0dc4cbe2f534df8952bb97c66 | /torchvision/prototype/models/quantization/inception.py | e9f48d097f69d12a2e0bf9c505328bde80a5ce40 | [
"BSD-3-Clause"
] | permissive | Hsuxu/vision | e78ea6bfbc8aa50c56573b467939e86df0138d07 | 8d3fb3455d5e1acb0fed412ece913b73774fbca4 | refs/heads/master | 2022-12-02T05:35:54.121664 | 2021-12-20T12:06:53 | 2021-12-20T12:06:53 | 215,186,338 | 1 | 0 | BSD-3-Clause | 2019-10-15T02:18:27 | 2019-10-15T02:18:27 | null | UTF-8 | Python | false | false | 2,937 | py | from functools import partial
from typing import Any, Optional, Union
from torchvision.prototype.transforms import ImageNetEval
from torchvision.transforms.functional import InterpolationMode
from ....models.quantization.inception import (
QuantizableInception3,
_replace_relu,
quantize_model,
)
from .._api import WeightsEnum, Weights
from .._meta import _IMAGENET_CATEGORIES
from .._utils import handle_legacy_interface, _ovewrite_named_param
from ..inception import Inception_V3_Weights
__all__ = [
"QuantizableInception3",
"Inception_V3_QuantizedWeights",
"inception_v3",
]
class Inception_V3_QuantizedWeights(WeightsEnum):
ImageNet1K_FBGEMM_V1 = Weights(
url="https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pth",
transforms=partial(ImageNetEval, crop_size=299, resize_size=342),
meta={
"size": (299, 299),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm",
"quantization": "ptq",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": Inception_V3_Weights.ImageNet1K_V1,
"acc@1": 77.176,
"acc@5": 93.354,
},
)
default = ImageNet1K_FBGEMM_V1
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: Inception_V3_QuantizedWeights.ImageNet1K_FBGEMM_V1
if kwargs.get("quantize", False)
else Inception_V3_Weights.ImageNet1K_V1,
)
)
def inception_v3(
*,
weights: Optional[Union[Inception_V3_QuantizedWeights, Inception_V3_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableInception3:
weights = (Inception_V3_QuantizedWeights if quantize else Inception_V3_Weights).verify(weights)
original_aux_logits = kwargs.get("aux_logits", False)
if weights is not None:
if "transform_input" not in kwargs:
_ovewrite_named_param(kwargs, "transform_input", True)
_ovewrite_named_param(kwargs, "aux_logits", True)
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableInception3(**kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
if quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
model.load_state_dict(weights.get_state_dict(progress=progress))
if not quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
return model
| [
"noreply@github.com"
] | Hsuxu.noreply@github.com |
f90d35587b9d175438dbef49cecb78e821bd4444 | a86ca34e23afaf67fdf858df9e47847606b23e0c | /lib/temboo/Library/Stripe/RetrieveInvoiceItem.py | 172fd56db2ed79b938b7e3caa824b0c46b744c8b | [] | no_license | miriammelnick/dont-get-mugged | 6026ad93c910baaecbc3f5477629b0322e116fa8 | 1613ee636c027ccc49c3f84a5f186e27de7f0f9d | refs/heads/master | 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py |
###############################################################################
#
# RetrieveInvoiceItem
# Retrieves invoice items with a specified id.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class RetrieveInvoiceItem(Choreography):
"""
Create a new instance of the RetrieveInvoiceItem Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Stripe/RetrieveInvoiceItem')
def new_input_set(self):
return RetrieveInvoiceItemInputSet()
def _make_result_set(self, result, path):
return RetrieveInvoiceItemResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RetrieveInvoiceItemChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the RetrieveInvoiceItem
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class RetrieveInvoiceItemInputSet(InputSet):
"""
Set the value of the APISecretKey input for this choreography. ((string) The secret API Key providied by Stripe)
"""
def set_APISecretKey(self, value):
InputSet._set_input(self, 'APISecretKey', value)
"""
Set the value of the InvoiceItemId input for this choreography. ((string) The unique identifier of the invoice item you want to retrieve)
"""
def set_InvoiceItemId(self, value):
InputSet._set_input(self, 'InvoiceItemId', value)
"""
A ResultSet with methods tailored to the values returned by the RetrieveInvoiceItem choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class RetrieveInvoiceItemResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. ((XML) The response from Stripe)
"""
def get_Response(self):
return self._output.get('Response', None)
class RetrieveInvoiceItemChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RetrieveInvoiceItemResultSet(response, path)
| [
"miriam@famulus"
] | miriam@famulus |
785a58526be736640a04656184c4044f41773f12 | bae5f696b76af428fb5555c147c4f1bcff1bb62e | /metalearn/metalearn/components/__init__.py | b92fcf251dc0446ca613d9c05a62233e48318bb2 | [
"MIT"
] | permissive | cosmicBboy/ml-research | 1e309f881f9810e7a82a262d625db5d684752705 | 04fd31f68e7a44152caf6eaaf66ab59f136dd8f5 | refs/heads/master | 2021-01-24T09:58:25.662826 | 2020-08-10T22:08:23 | 2020-08-10T22:08:23 | 123,030,133 | 8 | 4 | MIT | 2019-06-29T20:13:37 | 2018-02-26T21:03:02 | Jupyter Notebook | UTF-8 | Python | false | false | 331 | py | from .. import ignore_warnings
from . import (
algorithm_component, classifiers, constants, data_preprocessors,
feature_preprocessors, hyperparameter, regressors)
__all__ = [
algorithm_component,
classifiers,
constants,
data_preprocessors,
feature_preprocessors,
hyperparameter,
regressors,
]
| [
"niels.bantilan@gmail.com"
] | niels.bantilan@gmail.com |
75cf07e523193c41c942da387d214a49e9496885 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /uS8tMvEyvTXD88wps_18.py | 37c9907328e853b8866b8e881a203640fea172f4 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | """
Write a function that takes a string of one or more words as an argument and
returns the same string, but with all five or more letter words reversed.
Strings passed in will consist of only letters and spaces. Spaces will be
included only when more than one word is present.
### Examples
reverse("Reverse") ➞ "esreveR"
reverse("This is a typical sentence.") ➞ "This is a lacipyt .ecnetnes"
reverse("The dog is big.") ➞ "The dog is big."
### Notes
You can expect a valid string to be provided for each test case.
"""
def reverse(txt):
ret = []
for i in txt.split(" "):
if len(i) >= 5:
ret.append(i[::-1])
else:
ret.append(i)
return ' '.join(i for i in ret)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
6f3b3d341b798c8f51760663f81a43fb115bcb36 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2614/60654/288271.py | 7e91ab794edd7b4d263d9d979428fe5efdf5fda9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | a = int(input())
for i in range(a):
a1 = int(input())
b = list(map(int,input().split()))
c = list(map(int, input().split()))
d = list(map(int, input().split()))
for j in range(a1):
if b[j]-c[j] in d:
print(d.index(b[j]-c[j])) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
095e9c57f0e4181ee8a24b0683e0a1e5f1714b67 | 20a53d9a52f839ddec0cacff6ac12b63626c9548 | /phonopy/cui/collect_cell_info.py | 54e27b00ff106c1b0effd776a0e271eba6706872 | [] | permissive | ntq1982/phonopy | 339c6756c38cd7301167fc26fa117afdf1343b90 | fc73c9ba8815180bff8428174495c157d9444c68 | refs/heads/master | 2021-09-25T01:00:58.826347 | 2021-09-15T07:50:17 | 2021-09-15T07:50:17 | 238,440,992 | 0 | 0 | BSD-3-Clause | 2020-02-05T12:06:21 | 2020-02-05T12:06:20 | null | UTF-8 | Python | false | false | 5,759 | py | # Copyright (C) 2018 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from phonopy.interface.calculator import (
read_crystal_structure, get_default_cell_filename)
from phonopy.interface.vasp import read_vasp
def collect_cell_info(supercell_matrix=None,
primitive_matrix=None,
interface_mode=None,
cell_filename=None,
chemical_symbols=None,
enforce_primitive_matrix_auto=False,
command_name="phonopy",
symprec=1e-5):
if supercell_matrix is None:
_interface_mode = "phonopy_yaml"
elif interface_mode is None:
try:
if cell_filename is None:
read_vasp(get_default_cell_filename('vasp'))
else:
read_vasp(cell_filename)
_interface_mode = None
except (ValueError, TypeError):
# TypeError occurs when cell_filename is None.
# ValueError occurs in parsing POSCAR like file.
_interface_mode = "phonopy_yaml"
else:
_interface_mode = interface_mode
unitcell, optional_structure_info = read_crystal_structure(
filename=cell_filename,
interface_mode=_interface_mode,
chemical_symbols=chemical_symbols,
command_name=command_name)
unitcell_filename = optional_structure_info[0]
if _interface_mode == 'phonopy_yaml' and unitcell is not None:
if optional_structure_info[1] is None:
interface_mode_out = interface_mode
else:
interface_mode_out = optional_structure_info[1]
if optional_structure_info[2] is None:
_supercell_matrix = supercell_matrix
else:
_supercell_matrix = optional_structure_info[2]
if primitive_matrix is not None:
_primitive_matrix = primitive_matrix
elif optional_structure_info[3] is not None:
_primitive_matrix = optional_structure_info[3]
else:
_primitive_matrix = 'auto'
has_read_phonopy_yaml = True
else:
interface_mode_out = _interface_mode
_supercell_matrix = supercell_matrix
_primitive_matrix = primitive_matrix
has_read_phonopy_yaml = False
if enforce_primitive_matrix_auto:
_primitive_matrix = 'auto'
if _supercell_matrix is None and _primitive_matrix == 'auto':
supercell_matrix_out = np.eye(3, dtype='intc')
else:
supercell_matrix_out = _supercell_matrix
primitive_matrix_out = _primitive_matrix
if unitcell is None:
fname_list = optional_structure_info
if len(fname_list) == 1:
msg = "Crystal structure file of \"%s\"" % fname_list[0]
msg_list = ["%s was not found." % msg, ]
elif len(fname_list) == 2:
msg = "Crystal structure file of \"%s\" %s" % fname_list
msg_list = ["%s was not found." % msg, ]
elif len(fname_list) == 4:
msg_list = []
if supercell_matrix is None:
if cell_filename is None:
msg = ["Supercell matrix (DIM or --dim) is not specified. "
"To run phonopy without",
"explicitly setting supercell matrix, \"%s\" or \"%s\" "
% fname_list[:2],
"must exist in the current directory."]
msg_list += msg
else:
msg_list.append("Supercell matrix (DIM or --dim) may be "
"forgotten to be specified.")
elif cell_filename is None:
msg_list = ["Any crystal structure file was not found.", ""]
return "\n".join(msg_list)
if supercell_matrix_out is None:
return "Supercell matrix (DIM or --dim) is not specified."
# Check unit cell
if np.linalg.det(unitcell.get_cell()) < 0.0:
return "Lattice vectors have to follow the right-hand rule."
return (unitcell, supercell_matrix_out, primitive_matrix_out,
unitcell_filename, optional_structure_info, interface_mode_out,
has_read_phonopy_yaml)
| [
"atz.togo@gmail.com"
] | atz.togo@gmail.com |
8b141305bed2823096e4588e17ad9b7156da71df | 4a5ddb5fe1e1d532c9b6a4bb8b6f708bb548b60a | /scripts/elasticsearch-administer.py | 8bcad199ffaa57a3435785b5bea1c8fb589fb65b | [] | no_license | saketkanth/commcare-hq | 526ce305717e12f7b72d6484bfd15c2a6038aaff | 6d3eb1a0e70cc2a59a82ec5bba12170387803150 | refs/heads/master | 2021-01-18T02:08:41.430694 | 2016-06-14T15:19:10 | 2016-06-14T15:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,451 | py | #!/usr/bin/env python
"""
Utilities for administering elasticsearch
These can be run locally when connected to the VPN
"""
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from collections import namedtuple
import json
import sys
from elasticsearch import Elasticsearch
from elasticsearch.client import ClusterClient, NodesClient, CatClient, IndicesClient
def pprint(data):
print json.dumps(data, indent=4)
def confirm(msg):
if raw_input(msg + "\n(y/n)") != 'y':
sys.exit()
Node = namedtuple("Node", "name node_id docs settings")
def get_nodes_info(es):
nc = NodesClient(es)
stats = nc.stats(metric="indices", index_metric="docs")
info = nc.info()
return [
Node(
name=data['name'],
node_id=node_id,
docs=data['indices']['docs'],
settings=info['nodes'][node_id]['settings'],
)
for node_id, data in stats['nodes'].items()
]
def cluster_status(es):
cluster = ClusterClient(es)
print "\nCLUSTER HEALTH"
pprint(cluster.health())
print "\nPENDING TASKS"
pprint(cluster.pending_tasks())
print "\nNODES"
for node in get_nodes_info(es):
print node.name, node.docs
print "\nSHARD ALLOCATION"
cat = CatClient(es)
print cat.allocation(v=True)
def shard_status(es):
cat = CatClient(es)
print cat.shards(v=True)
def cluster_settings(es):
cluster = ClusterClient(es)
pprint(cluster.get_settings())
def index_settings(es):
indices = IndicesClient(es)
pprint(indices.get_settings(flat_settings=True))
def create_replica_shards(es):
# https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices-update-settings.html
indices = IndicesClient(es)
pprint(indices.put_settings({"index.number_of_replicas": 1}, "_all"))
def cancel_replica_shards(es):
indices = IndicesClient(es)
pprint(indices.put_settings({"index.number_of_replicas": 0}, "_all"))
def decommission_node(es):
cluster = ClusterClient(es)
print "The nodes are:"
nodes = get_nodes_info(es)
for node in nodes:
print node.name, node.docs
confirm("Are you sure you want to decommission a node?")
node_name = raw_input("Which one would you like to decommission?\nname:")
names = [node.name for node in nodes]
if node_name not in names:
print "You must enter one of {}".format(", ".join(names))
return
confirm("This will remove all shards from {}, okay?".format(node_name))
cmd = {"transient": {"cluster.routing.allocation.exclude._name": node_name}}
pprint(cluster.put_settings(cmd))
print "The node is now being decommissioned."
def force_zone_replicas(es):
cluster = ClusterClient(es)
print "NODE SETTINGS:"
for node in get_nodes_info(es):
pprint(node.settings)
zones = raw_input("\nEnter the zone names, separated by a comma\n")
confirm("Are you sure these zones exist?")
cmd = {"persistent": {"cluster.routing.allocation.awareness.force.zone.values": zones,
"cluster.routing.allocation.awareness.attributes": "zone"}}
print "This will add the following settings"
pprint(cmd)
confirm("Okay?")
pprint(cluster.put_settings(cmd))
print "Finished"
def clear_zone_replicas(es):
# There doesn't appear to be a proper way to unset settings
# https://github.com/elastic/elasticsearch/issues/6732
cluster = ClusterClient(es)
cmd = {"persistent": {"cluster.routing.allocation.awareness.force.zone.values": "",
"cluster.routing.allocation.awareness.attributes": ""}}
confirm("Remove the allocation awareness settings from the cluster?")
pprint(cluster.put_settings(cmd))
print "Cleared"
commands = {
'cluster_status': cluster_status,
'cluster_settings': cluster_settings,
'index_settings': index_settings,
'decommission_node': decommission_node,
'shard_status': shard_status,
'force_zone_replicas': force_zone_replicas,
'clear_zone_replicas': clear_zone_replicas,
}
def main():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('host_url')
parser.add_argument('command', choices=commands.keys())
args = parser.parse_args()
es = Elasticsearch([{'host': args.host_url, 'port': 9200}])
commands[args.command](es)
if __name__ == "__main__":
main()
| [
"esoergel@gmail.com"
] | esoergel@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.