blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
bb9b72d22626e0d25fcc15eb9591c80556396118
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02699/s167779272.py
5ae2267cef04e2bb394c6351bcb67a32ba3d4e70
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
149
py
def main(): S, W = list(map(int, input().split())) print("unsafe" if S <= W else "safe") pass if __name__ == '__main__': main()
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
6154ea85af213859285844a8e9cbf8f6b018ff55
d37f798101bc6cc795b3ff7e5f9444ff30b4cd83
/kubernetes/client/models/v1alpha1_parent_reference.py
18f7447520f6057f4d3b3f73bc21bd1645ac457f
[ "Apache-2.0" ]
permissive
MorningSong/python
bdd8b9d60b7c2185457fc1bbbc64d098f9682981
ae7b5ddd219fe09b6ed0be715dcca3377a029584
refs/heads/master
2023-08-30T14:41:41.582335
2023-08-23T16:15:28
2023-08-23T16:15:28
139,396,247
0
0
Apache-2.0
2023-09-14T00:11:24
2018-07-02T05:47:43
Python
UTF-8
Python
false
false
6,766
py
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: release-1.27 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class V1alpha1ParentReference(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'group': 'str', 'name': 'str', 'namespace': 'str', 'resource': 'str', 'uid': 'str' } attribute_map = { 'group': 'group', 'name': 'name', 'namespace': 'namespace', 'resource': 'resource', 'uid': 'uid' } def __init__(self, group=None, name=None, namespace=None, resource=None, uid=None, local_vars_configuration=None): # noqa: E501 """V1alpha1ParentReference - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._group = None self._name = None self._namespace = None self._resource = None self._uid = None self.discriminator = None if group is not None: self.group = group if name is not None: self.name = name if namespace is not None: self.namespace = namespace if resource is not None: self.resource = resource if uid is not None: self.uid = uid @property def group(self): """Gets the group of this V1alpha1ParentReference. # noqa: E501 Group is the group of the object being referenced. # noqa: E501 :return: The group of this V1alpha1ParentReference. # noqa: E501 :rtype: str """ return self._group @group.setter def group(self, group): """Sets the group of this V1alpha1ParentReference. Group is the group of the object being referenced. # noqa: E501 :param group: The group of this V1alpha1ParentReference. # noqa: E501 :type: str """ self._group = group @property def name(self): """Gets the name of this V1alpha1ParentReference. # noqa: E501 Name is the name of the object being referenced. # noqa: E501 :return: The name of this V1alpha1ParentReference. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this V1alpha1ParentReference. Name is the name of the object being referenced. # noqa: E501 :param name: The name of this V1alpha1ParentReference. # noqa: E501 :type: str """ self._name = name @property def namespace(self): """Gets the namespace of this V1alpha1ParentReference. # noqa: E501 Namespace is the namespace of the object being referenced. # noqa: E501 :return: The namespace of this V1alpha1ParentReference. # noqa: E501 :rtype: str """ return self._namespace @namespace.setter def namespace(self, namespace): """Sets the namespace of this V1alpha1ParentReference. Namespace is the namespace of the object being referenced. # noqa: E501 :param namespace: The namespace of this V1alpha1ParentReference. # noqa: E501 :type: str """ self._namespace = namespace @property def resource(self): """Gets the resource of this V1alpha1ParentReference. # noqa: E501 Resource is the resource of the object being referenced. # noqa: E501 :return: The resource of this V1alpha1ParentReference. # noqa: E501 :rtype: str """ return self._resource @resource.setter def resource(self, resource): """Sets the resource of this V1alpha1ParentReference. Resource is the resource of the object being referenced. # noqa: E501 :param resource: The resource of this V1alpha1ParentReference. # noqa: E501 :type: str """ self._resource = resource @property def uid(self): """Gets the uid of this V1alpha1ParentReference. # noqa: E501 UID is the uid of the object being referenced. # noqa: E501 :return: The uid of this V1alpha1ParentReference. # noqa: E501 :rtype: str """ return self._uid @uid.setter def uid(self, uid): """Sets the uid of this V1alpha1ParentReference. UID is the uid of the object being referenced. # noqa: E501 :param uid: The uid of this V1alpha1ParentReference. # noqa: E501 :type: str """ self._uid = uid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1alpha1ParentReference): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1alpha1ParentReference): return True return self.to_dict() != other.to_dict()
[ "yliao@google.com" ]
yliao@google.com
eafd3efda8542b0d4645d3e90c82145723998525
2fb738f3bdabebf32296150405486377dba7812b
/nuitka/freezer/DependsExe.py
18433052e9e87ad3cdd5729630829cc4c783b534
[ "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
goslion/Nuitka
31f62c9083a1eaec104d64eeebc0b9fb50560812
4a30a987b1586271c31822f574ca2584d1107212
refs/heads/master
2023-03-03T08:06:13.420278
2021-02-07T22:06:13
2021-02-07T22:06:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,923
py
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Interface to depends.exe on Windows. We use depends.exe to investigate needed DLLs of Python DLLs. """ from nuitka.Options import assumeYesForDownloads from nuitka.utils.Download import getCachedDownload from nuitka.utils.Utils import getArchitecture def getDependsExePath(): """Return the path of depends.exe (for Windows). Will prompt the user to download if not already cached in AppData directory for Nuitka. """ if getArchitecture() == "x86": depends_url = "http://dependencywalker.com/depends22_x86.zip" else: depends_url = "http://dependencywalker.com/depends22_x64.zip" return getCachedDownload( url=depends_url, is_arch_specific=True, binary="depends.exe", flatten=True, specifity="", # Note: If there ever was an update, put version here. message="""\ Nuitka will make use of Dependency Walker (http://dependencywalker.com) tool to analyze the dependencies of Python extension modules.""", reject="Nuitka does not work in --standalone on Windows without.", assume_yes_for_downloads=assumeYesForDownloads(), )
[ "kay.hayen@gmail.com" ]
kay.hayen@gmail.com
6e90ef1077bbefd52a47c39eaf3d32fe9090c6d7
99c4d4a6592fded0e8e59652484ab226ac0bd38c
/code/batch-2/vse-naloge-brez-testov/DN13-Z-077.py
bcccc875f9d0ca0da1cc8bf82548e3362986f142
[]
no_license
benquick123/code-profiling
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
0d496d649247776d121683d10019ec2a7cba574c
refs/heads/master
2021-10-08T02:53:50.107036
2018-12-06T22:56:38
2018-12-06T22:56:38
126,011,752
0
0
null
null
null
null
UTF-8
Python
false
false
1,260
py
import itertools class Minobot: def __init__(self): self.x = 0 self.y = 0 self.smer = "desno" def naprej(self, premik): if (self.smer == "desno"): self.x += premik elif (self.smer == "levo"): self.x -= premik elif (self.smer == "gor"): self.y += premik elif (self.smer == "dol"): self.y -= premik def koordinate(self): return self.x, self.y def desno(self): if (self.smer == "desno"): self.smer = "dol" elif (self.smer == "dol"): self.smer = "levo" elif (self.smer == "levo"): self.smer = "gor" elif (self.smer == "gor"): self.smer = "desno" def levo(self): if (self.smer == "desno"): self.smer = "gor" elif (self.smer == "gor"): self.smer = "levo" elif (self.smer == "levo"): self.smer = "dol" elif (self.smer == "dol"): self.smer = "desno" def razdalja(self): return abs(self.x) + abs(self.y) a = Minobot() a.levo() a.naprej(4) a.desno() a.naprej(3) #print(a.koordinate())
[ "benjamin.fele@gmail.com" ]
benjamin.fele@gmail.com
80737f45f33bac9e5445d7f37314f4c3515006f4
9e41cd05ee3d36d09e2dfb49af8212c3aee3cd61
/kisházik/classification_EMP2B5.py
c2e3885645ae26b3c237756509ab3bd62b09723f
[]
no_license
matech96/ADA
2cf60eeacb0cdf95ce8486169ddd9e4e1bb2311f
b15c8e339291014af13e03cd3a099e1914198ff9
refs/heads/master
2020-09-14T18:02:05.271191
2020-01-28T16:04:29
2020-01-28T16:04:29
223,208,311
0
0
null
null
null
null
UTF-8
Python
false
false
2,269
py
#!/usr/bin/env python # coding: utf-8 # In[1]: from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score import pandas as pd from sklearn.linear_model import LinearRegression # In[2]: df = pd.read_csv('../data/DataSet_Hitelbiralat_preprocessed.csv') # In[3]: def rounding_score_decorator(score): return lambda y_true, y_pred: score(y_true, y_pred > 0.5) def text2score(optimalization): if optimalization == 'AUC': score = roc_auc_score elif optimalization == 'Precision': score = rounding_score_decorator(precision_score) elif optimalization == 'Recall': score = rounding_score_decorator(recall_score) elif optimalization == 'Accuracy': score = rounding_score_decorator(accuracy_score) return score def modell_evaluator(data, input_attributes, target_attribute, model, optimalization): score = text2score(optimalization) split_idx = len(df) // 2 data_train = data[:split_idx] data_test = data[split_idx:] def test_attributes(fix_input, possible_inputs): best_score = -1 best_input = None for possible_input in possible_inputs: model.fit(data_train[fix_input + [possible_input]], data_train[target_attribute]) predicted = model.predict(data_test[fix_input + [possible_input]]) s = score(data_test[target_attribute], predicted) if s > best_score: best_score = s best_input = possible_input return best_input, best_score good_inputs = [] in_race_inputs = input_attributes best_s = -1 while len(in_race_inputs): i_to_accept, s = test_attributes([], input_attributes) if s < best_s: return best_s, good_inputs best_s = s good_inputs.append(i_to_accept) in_race_inputs.remove(i_to_accept) return best_s, good_inputs # In[4]: i = df.columns.to_list() i.remove('TARGET_LABEL_BAD') modell_evaluator(df, i, #['Sex', 'Age', 'MONTHS_IN_THE_JOB', 'PERSONAL_NET_INCOME', 'PAYMENT_DAY'], 'TARGET_LABEL_BAD', LinearRegression(), 'AUC')
[ "gangoly96@gmail.com" ]
gangoly96@gmail.com
1d912dceae386ef74247aae0ce3c3d92d2ee8ed8
e3c8f786d09e311d6ea1cab50edde040bf1ea988
/Incident-Response/Tools/grr/grr/server/grr_response_server/gui/api_plugins/reflection_test.py
bbc604dfacc9a8f3d4e2f1ef5f8c6585f034152e
[ "MIT", "Apache-2.0" ]
permissive
foss2cyber/Incident-Playbook
d1add8aec6e28a19e515754c6ce2e524d67f368e
a379a134c0c5af14df4ed2afa066c1626506b754
refs/heads/main
2023-06-07T09:16:27.876561
2021-07-07T03:48:54
2021-07-07T03:48:54
384,988,036
1
0
MIT
2021-07-11T15:45:31
2021-07-11T15:45:31
null
UTF-8
Python
false
false
3,974
py
#!/usr/bin/env python """This module contains tests for reflection API handlers.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from absl import app from grr_response_server.gui import api_call_router from grr_response_server.gui import api_test_lib from grr_response_server.gui.api_plugins import reflection as reflection_plugin from grr.test_lib import test_lib class ApiGetRDFValueDescriptorHandlerTest(api_test_lib.ApiCallHandlerTest): """Test for ApiGetRDFValueDescriptorHandler.""" def testSuccessfullyRendersReflectionDataForAllTypes(self): result = reflection_plugin.ApiListRDFValuesDescriptorsHandler().Handle( None, context=self.context) # TODO(user): enhance this test. self.assertTrue(result) class DummyApiCallRouter(api_call_router.ApiCallRouter): """Dummy ApiCallRouter implementation overriding just 1 method.""" @api_call_router.Http("GET", "/api/method1") @api_call_router.ArgsType(api_test_lib.SampleGetHandlerArgs) def SomeRandomMethodWithArgsType(self, args, context=None): """Doc 1.""" @api_call_router.Http("GET", "/api/method2") @api_call_router.ResultType(api_test_lib.SampleGetHandlerArgs) def SomeRandomMethodWithResultType(self, args, context=None): """Doc 2.""" @api_call_router.Http("GET", "/api/method3") @api_call_router.ArgsType(api_test_lib.SampleGetHandlerArgs) @api_call_router.ResultType(api_test_lib.SampleGetHandlerArgs) def SomeRandomMethodWithArgsTypeAndResultType(self, args, context=None): """Doc 3.""" class ApiListApiMethodsHandlerTest(api_test_lib.ApiCallHandlerTest): """Test for ApiListApiMethodsHandler.""" def setUp(self): super().setUp() self.router = DummyApiCallRouter() self.handler = reflection_plugin.ApiListApiMethodsHandler(self.router) def testRendersMethodWithArgsCorrectly(self): result = self.handler.Handle(None, context=self.context) method = [ item for item in result.items if item.name == "SomeRandomMethodWithArgsType" ][0] self.assertEqual(method.doc, "Doc 1.") self.assertEqual(method.args_type_descriptor.name, "SampleGetHandlerArgs") self.assertEqual( method.args_type_descriptor.AsPrimitiveProto().default.type_url, "type.googleapis.com/grr.SampleGetHandlerArgs") self.assertEqual(method.result_kind, "NONE") self.assertFalse(method.HasField("result_type")) def testRendersMethodWithResultTypeCorrectly(self): result = self.handler.Handle(None, context=self.context) method = [ item for item in result.items if item.name == "SomeRandomMethodWithResultType" ][0] self.assertEqual(method.doc, "Doc 2.") self.assertFalse(method.HasField("args_type")) self.assertEqual(method.result_kind, "VALUE") self.assertEqual(method.result_type_descriptor.name, "SampleGetHandlerArgs") self.assertEqual( method.result_type_descriptor.AsPrimitiveProto().default.type_url, "type.googleapis.com/grr.SampleGetHandlerArgs") def testRendersMethodWithArgsTypeAndResultTypeCorrectly(self): result = self.handler.Handle(None, context=self.context) method = [ item for item in result.items if item.name == "SomeRandomMethodWithArgsTypeAndResultType" ][0] self.assertEqual(method.doc, "Doc 3.") self.assertEqual(method.args_type_descriptor.name, "SampleGetHandlerArgs") self.assertEqual( method.args_type_descriptor.AsPrimitiveProto().default.type_url, "type.googleapis.com/grr.SampleGetHandlerArgs") self.assertEqual(method.result_kind, "VALUE") self.assertEqual(method.result_type_descriptor.name, "SampleGetHandlerArgs") self.assertEqual( method.result_type_descriptor.AsPrimitiveProto().default.type_url, "type.googleapis.com/grr.SampleGetHandlerArgs") def main(argv): test_lib.main(argv) if __name__ == "__main__": app.run(main)
[ "a.songer@protonmail.com" ]
a.songer@protonmail.com
73d467d6ab8185e9e67e75cb05f4ebc9019517a1
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
/lib/python2.7/site-packages/openopt/__init__.py
76070a202bb544deddb6eb445f4f4fae6c2a6b53
[ "Python-2.0", "Apache-2.0", "BSD-3-Clause", "LicenseRef-scancode-unknown" ]
permissive
wangyum/Anaconda
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
refs/heads/master
2022-10-21T15:14:23.464126
2022-10-05T12:10:31
2022-10-05T12:10:31
76,526,728
11
10
Apache-2.0
2022-10-05T12:10:32
2016-12-15T05:26:12
Python
UTF-8
Python
false
false
1,119
py
#! /usr/bin/env python #from .ooVersionNumber import __version__ import os, sys curr_dir = ''.join([elem + os.sep for elem in __file__.split(os.sep)[:-1]]) sys.path += [curr_dir, curr_dir + 'kernel'] from ooVersionNumber import __version__ from oo import * #from kernel.GUI import manage #from kernel.oologfcn import OpenOptException #from kernel.nonOptMisc import oosolver from GUI import manage from oologfcn import OpenOptException from nonOptMisc import oosolver from mfa import MFA isE = False try: import enthought isE = True except ImportError: pass try: import envisage import mayavi isE = True except ImportError: pass try: import xy isE = False except ImportError: pass if isE: s = """ Seems like you are using OpenOpt from commercial Enthought Python Distribution; consider using free GPL-licensed alternatives PythonXY (http://www.pythonxy.com) or Sage (http://sagemath.org) instead. """ print(s) #__all__ = filter(lambda s:not s.startswith('_'),dir()) #from numpy.testing import NumpyTest #test = NumpyTest().test
[ "noreply@github.com" ]
wangyum.noreply@github.com
ccf7ef2d3e547fc5865b9d05d078122acb39a3a0
9aaa39f200ee6a14d7d432ef6a3ee9795163ebed
/Algorithm/Python/624. Maximum Distance in Arrays.py
a024e9db5b888db8f0029cd55f4da18dc8085909
[]
no_license
WuLC/LeetCode
47e1c351852d86c64595a083e7818ecde4131cb3
ee79d3437cf47b26a4bca0ec798dc54d7b623453
refs/heads/master
2023-07-07T18:29:29.110931
2023-07-02T04:31:00
2023-07-02T04:31:00
54,354,616
29
16
null
null
null
null
UTF-8
Python
false
false
799
py
# -*- coding: utf-8 -*- # @Author: LC # @Date: 2017-06-18 16:34:58 # @Last modified by: LC # @Last Modified time: 2017-06-18 16:38:28 # @Email: liangchaowu5@gmail.com # O(n) time # traverse the arrays, # keep the min number and max number among the traversed numbers so far and compare them with the current number class Solution(object): def maxDistance(self, arrays): """ :type arrays: List[List[int]] :rtype: int """ result = 0 curr_min, curr_max = arrays[0][0], arrays[0][-1] for i in xrange(1, len(arrays)): result = max(result, abs(arrays[i][0] - curr_max), abs(arrays[i][-1] - curr_min)) curr_max = max(curr_max, arrays[i][-1]) curr_min = min(curr_min, arrays[i][0]) return result
[ "liangchaowu5@gmail.com" ]
liangchaowu5@gmail.com
ebb8c77391a9e3bd64b8b627a3638e7999db0425
c698fb03aa2bf034904a0310931b473b6da66fdc
/com/study/algorithm/daily/73. Set Matrix Zeroes.py
72c686e73159edddb22b711340fc520e9b884642
[]
no_license
pi408637535/Algorithm
e46df1d07a519ab110e4f97755f461a1b2b7c308
75f4056ec6da01f7466a272871a7f7db579166b4
refs/heads/master
2021-08-29T19:19:53.368953
2021-08-22T16:30:32
2021-08-22T16:30:32
213,289,503
1
0
null
null
null
null
UTF-8
Python
false
false
2,815
py
from typing import List from typing import List class Solution: def setZeroes(self, matrix: List[List[int]]) -> None: """ Do not return anything, modify matrix in-place instead. """ if not matrix or not matrix: return m, n = len(matrix), len(matrix[0]) flag = [[False] * n for i in range(m)] def help(i, j): flag[i][j] = True down_i = i + 1 while down_i < m: if matrix[down_i][j]: flag[down_i][j] = True matrix[down_i][j] = 0 down_i += 1 up_i = i - 1 while up_i >= 0: if matrix[up_i][j]: flag[up_i][j] = True matrix[up_i][j] = 0 up_i -= 1 left_j = j - 1 while left_j >= 0: if matrix[i][left_j]: flag[i][left_j] = True matrix[i][left_j] = 0 left_j -= 1 right_j = j + 1 while right_j < n: if matrix[i][right_j]: flag[i][right_j] = True matrix[i][right_j] = 0 right_j += 1 for i in range(m): for j in range(n): if matrix[i][j] == 0 and not flag[i][j]: help(i, j) import copy class Solution: def setZeroes(self, matrix: List[List[int]]) -> None: """ Do not return anything, modify matrix in-place instead. """ if not matrix or not matrix: return m, n = len(matrix), len(matrix[0]) matrix_copy = copy.deepcopy(matrix) for i in range(m): for j in range(n): if matrix_copy[i][j] == 0: for k in range(m): matrix[k][j] = 0 for k in range(n): matrix[i][k] = 0 #空间复杂度 class Solution: def setZeroes(self, matrix: List[List[int]]) -> None: """ Do not return anything, modify matrix in-place instead. """ if not matrix or not matrix: return m, n = len(matrix), len(matrix[0]) rows, columns = [False] * m, [False] * n for i in range(m): for j in range(n): if not matrix[i][j]: rows[i] = columns[j] = True for i in range(m): for j in range(n): if rows[i] or columns[j]: matrix[i][j] = 0 if __name__ == '__main__': matrix = [[1, 1, 1], [1, 0, 1], [1, 1, 1]] matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]] matrix = [[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]] Solution().setZeroes(matrix) print(matrix)
[ "piguanghua@163.com" ]
piguanghua@163.com
ea22858c3a3da17f4d8b8806cde34a8f102b33f5
f4b60f5e49baf60976987946c20a8ebca4880602
/lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fvns/addrinst.py
45a74f921e5aabecaaba2f160e60df1783f16095
[]
no_license
cqbomb/qytang_aci
12e508d54d9f774b537c33563762e694783d6ba8
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
refs/heads/master
2022-12-21T13:30:05.240231
2018-12-04T01:46:53
2018-12-04T01:46:53
159,911,666
0
0
null
2022-12-07T23:53:02
2018-12-01T05:17:50
Python
UTF-8
Python
false
false
6,765
py
# coding=UTF-8 # ********************************************************************** # Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved # written by zen warriors, do not modify! # ********************************************************************** from cobra.mit.meta import ClassMeta from cobra.mit.meta import StatsClassMeta from cobra.mit.meta import CounterMeta from cobra.mit.meta import PropMeta from cobra.mit.meta import Category from cobra.mit.meta import SourceRelationMeta from cobra.mit.meta import NamedSourceRelationMeta from cobra.mit.meta import TargetRelationMeta from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory from cobra.model.category import MoCategory, PropCategory, CounterCategory from cobra.mit.mo import Mo # ################################################## class AddrInst(Mo): """ The IP address namespace/IP address range contains unicast and multicast address blocks. """ meta = ClassMeta("cobra.model.fvns.AddrInst") meta.moClassName = "fvnsAddrInst" meta.rnFormat = "addrinst-%(name)s" meta.category = MoCategory.REGULAR meta.label = "IP Address Pool" meta.writeAccessMask = 0x2001 meta.readAccessMask = 0x900000002001 meta.isDomainable = False meta.isReadOnly = False meta.isConfigurable = True meta.isDeletable = True meta.isContextRoot = False meta.childClasses.add("cobra.model.fvns.RtAddrInst") meta.childClasses.add("cobra.model.fvns.UcastAddrBlk") meta.childClasses.add("cobra.model.fvns.RtVipAddrNs") meta.childClasses.add("cobra.model.fault.Delegate") meta.childNamesAndRnPrefix.append(("cobra.model.fvns.RtVipAddrNs", "rtinfraVipAddrNs-")) meta.childNamesAndRnPrefix.append(("cobra.model.fvns.RtAddrInst", "rtmgmtAddrInst-")) meta.childNamesAndRnPrefix.append(("cobra.model.fvns.UcastAddrBlk", "fromaddr-")) meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-")) meta.parentClasses.add("cobra.model.fv.Tenant") meta.superClasses.add("cobra.model.naming.NamedObject") meta.superClasses.add("cobra.model.pol.Obj") meta.superClasses.add("cobra.model.pol.Ns") meta.superClasses.add("cobra.model.fvns.AAddrInstP") meta.superClasses.add("cobra.model.pol.Def") meta.rnPrefixes = [ ('addrinst-', True), ] prop = PropMeta("str", "addr", "addr", 4962, PropCategory.REGULAR) prop.label = "IP Address" prop.isConfig = True prop.isAdmin = True meta.props.add("addr", prop) prop = PropMeta("str", "addrType", "addrType", 19828, PropCategory.REGULAR) prop.label = "Address Type" prop.isConfig = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "regular" prop._addConstant("regular", "regular", 0) prop._addConstant("vip_range", "vip_range", 1) meta.props.add("addrType", prop) prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("deleteAll", "deleteall", 16384) prop._addConstant("deleteNonPresent", "deletenonpresent", 8192) prop._addConstant("ignore", "ignore", 4096) meta.props.add("childAction", prop) prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR) prop.label = "Description" prop.isConfig = True prop.isAdmin = True prop.range = [(0, 128)] prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+'] meta.props.add("descr", prop) prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN) prop.label = "None" prop.isDn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("dn", prop) prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "local" prop._addConstant("implicit", "implicit", 4) prop._addConstant("local", "local", 0) prop._addConstant("policy", "policy", 1) prop._addConstant("replica", "replica", 2) prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3) meta.props.add("lcOwn", prop) prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "never" prop._addConstant("never", "never", 0) meta.props.add("modTs", prop) prop = PropMeta("str", "name", "name", 6566, PropCategory.REGULAR) prop.label = "Name" prop.isConfig = True prop.isAdmin = True prop.isCreateOnly = True prop.isNaming = True prop.range = [(1, 64)] prop.regex = ['[a-zA-Z0-9_.:-]+'] meta.props.add("name", prop) prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR) prop.label = "None" prop.isConfig = True prop.isAdmin = True prop.range = [(0, 128)] prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+'] meta.props.add("ownerKey", prop) prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR) prop.label = "None" prop.isConfig = True prop.isAdmin = True prop.range = [(0, 64)] prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+'] meta.props.add("ownerTag", prop) prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN) prop.label = "None" prop.isRn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("rn", prop) prop = PropMeta("str", "skipGwVal", "skipGwVal", 16373, PropCategory.REGULAR) prop.label = "Skip GW Validation" prop.isConfig = True prop.isAdmin = True prop.defaultValue = False prop.defaultValueStr = "no" prop._addConstant("no", None, False) prop._addConstant("yes", None, True) meta.props.add("skipGwVal", prop) prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("created", "created", 2) prop._addConstant("deleted", "deleted", 8) prop._addConstant("modified", "modified", 4) meta.props.add("status", prop) prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True meta.props.add("uid", prop) meta.namingProps.append(getattr(meta.props, "name")) def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps): namingVals = [name] Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps) # End of package file # ##################################################
[ "collinsctk@qytang.com" ]
collinsctk@qytang.com
3f518d1941b93c0126a47ddb4ac3959257e6c413
a830f67a97103b750ed2ced5997285532762f25d
/test_dot_env/test_dot_env/tests.py
448aa445bfd50deca27632fa9e4bcd5b268942e8
[]
no_license
Durant21/test_dot_env
308d29ebbdafa24306cd89e02079d0adbb017fd7
40f6894ff7adc91c9870c96bbd44d62410e1eeb0
refs/heads/master
2022-12-21T00:04:56.754078
2019-10-02T18:59:38
2019-10-02T18:59:38
212,382,378
0
1
null
null
null
null
UTF-8
Python
false
false
717
py
import unittest from pyramid import testing class ViewTests(unittest.TestCase): def setUp(self): self.config = testing.setUp() def tearDown(self): testing.tearDown() def test_my_view(self): from .views.default import my_view request = testing.DummyRequest() info = my_view(request) self.assertEqual(info['project'], 'test_dot_env') class FunctionalTests(unittest.TestCase): def setUp(self): from test_dot_env import main app = main({}) from webtest import TestApp self.testapp = TestApp(app) def test_root(self): res = self.testapp.get('/', status=200) self.assertTrue(b'Pyramid' in res.body)
[ "durant.crimson@icloud.com" ]
durant.crimson@icloud.com
8b5bea4bf2920bf639d60e870173c108a9782dd6
0c78c4356f9df3a5c28adc2bdab7bad750b49d35
/setup.py
37220f5e42a00c2a7f2a4252bd8d8fed9ffad6a0
[ "MIT" ]
permissive
julianblue/agoro-field-boundary-detector
b985513475f4f32973b88d965ed5586d74ecbb01
9dd911df096ce865471ed0330174044f4172cc66
refs/heads/master
2023-06-06T16:58:46.177772
2021-06-22T16:13:27
2021-06-22T16:13:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
986
py
"""Setup module for this Python package.""" import pathlib from setuptools import find_packages, setup # The directory containing this file HERE = pathlib.Path(__file__).parent # The text of the README file README = (HERE / "README.md").read_text() INSTALL_REQUIRES = [ "tqdm", "torch~=1.8.1", "torchvision~=0.9.1", "pycocotools~=2.0.2", "earthengine-api~=0.1.267", "opencv-python~=4.5.2.52", ] setup( name="agoro_field_boundary_detector", version="0.1.1", description="Detect field boundaries using satellite imagery.", long_description=README, long_description_content_type="text/markdown", url="https://github.com/radix-ai/agoro-field-boundary-detector", author="Radix", author_email="developers@radix.ai", package_dir={"": "src"}, packages=find_packages(where="src", exclude=("data", "models", "notebooks", "tasks")), license="LICENSE", install_requires=INSTALL_REQUIRES, include_package_data=True, )
[ "broekxruben@gmail.com" ]
broekxruben@gmail.com
68f0f8c2e5a4740c22cfcd37baf82be5fea82e65
30e1dc84fe8c54d26ef4a1aff000a83af6f612be
/deps/src/libxml2-2.9.1/python/tests/validDTD.py
ee35c067b3a24970df32163eaa39c77880183bb3
[ "BSD-3-Clause", "MIT" ]
permissive
Sitispeaks/turicreate
0bda7c21ee97f5ae7dc09502f6a72abcb729536d
d42280b16cb466a608e7e723d8edfbe5977253b6
refs/heads/main
2023-05-19T17:55:21.938724
2021-06-14T17:53:17
2021-06-14T17:53:17
385,034,849
1
0
BSD-3-Clause
2021-07-11T19:23:21
2021-07-11T19:23:20
null
UTF-8
Python
false
false
1,224
py
#!/usr/bin/python -u import libxml2 import sys ARG = 'test string' class ErrorHandler: def __init__(self): self.errors = [] def handler(self, msg, data): if data != ARG: raise Exception("Error handler did not receive correct argument") self.errors.append(msg) # Memory debug specific libxml2.debugMemory(1) dtd="""<!ELEMENT foo EMPTY>""" valid="""<?xml version="1.0"?> <foo></foo>""" invalid="""<?xml version="1.0"?> <foo><bar/></foo>""" dtd = libxml2.parseDTD(None, 'test.dtd') ctxt = libxml2.newValidCtxt() e = ErrorHandler() ctxt.setValidityErrorHandler(e.handler, e.handler, ARG) # Test valid document doc = libxml2.parseDoc(valid) ret = doc.validateDtd(ctxt, dtd) if ret != 1 or e.errors: print("error doing DTD validation") sys.exit(1) doc.freeDoc() # Test invalid document doc = libxml2.parseDoc(invalid) ret = doc.validateDtd(ctxt, dtd) if ret != 0 or not e.errors: print("Error: document supposed to be invalid") doc.freeDoc() dtd.freeDtd() del dtd del ctxt # Memory debug specific libxml2.cleanupParser() if libxml2.debugMemory(1) == 0: print("OK") else: print("Memory leak %d bytes" % (libxml2.debugMemory(1))) libxml2.dumpMemory()
[ "znation@apple.com" ]
znation@apple.com
e55d95c481ad73ca8bf90ae9a403979a13a469b0
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03821/s969389449.py
aee420fedd1c2214ea27397c8853912146d541c1
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
156
py
N, *AB = [map(int, s.split()) for s in open(0)] AB = list(AB)[::-1] bias = 0 for A, B in AB: bias += (bias + A + B - 1) // B * B - A - bias print(bias)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
f58cc5da39483f3d604be5366b4693ca0e122b4c
b38fb62950582664158327a2abf29c84cc59178b
/0x02-python-import_modules/2-args.py
34615c54c5ce8850ce044cc4761e27557383568d
[]
no_license
MiguelCF06/holbertonschool-higher_level_programming
a39129cf355abe15e2caeb41cdef385ace53cfda
0bc44343cb20c97221d3886bafda6db7235bc13a
refs/heads/master
2022-12-18T00:12:52.498624
2020-09-24T17:00:24
2020-09-24T17:00:24
259,323,305
1
2
null
null
null
null
UTF-8
Python
false
false
444
py
#!/usr/bin/python3 if __name__ == "__main__": from sys import argv if (len(argv)-1 == 0): print("0 arguments.") elif (len(argv)-1 == 1): print("{} argument:".format(len(argv) - 1)) print("{}: {}".format(len(argv)-1, argv[1])) else: j = 1 print("{} arguments:".format(len(argv) - 1)) while j <= len(argv)-1: print("{:d}: {}".format(j, argv[j])) j = j + 1
[ "miguel.cipamocha@gmail.com" ]
miguel.cipamocha@gmail.com
e21afb3557c986e0856f76cc979a03e2b8372c33
d1aa6e7d5631d7806531660febbd1f856eaeece7
/python/paddle/utils/op_version.py
575e5f40772eb08ea2c79d4ac73d7d04c5f9cfbf
[ "Apache-2.0" ]
permissive
gongweibao/Paddle
510cd4bc0ef89bc6ccee7b6b8eca52c00e014b77
60f9c60cd8196c66c391d79c35d341e9072f8838
refs/heads/develop
2023-03-13T17:43:35.675875
2022-09-20T08:46:15
2022-09-20T08:46:15
82,279,237
3
2
Apache-2.0
2021-05-26T06:17:43
2017-02-17T09:16:16
Python
UTF-8
Python
false
false
2,306
py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..fluid import core __all__ = [] def Singleton(cls): _instance = {} def _singleton(*args, **kargs): if cls not in _instance: _instance[cls] = cls(*args, **kargs) return _instance[cls] return _singleton class OpUpdateInfoHelper(object): def __init__(self, info): self._info = info def verify_key_value(self, name=''): result = False key_funcs = { core.OpAttrInfo: 'name', core.OpInputOutputInfo: 'name', } if name == '': result = True elif type(self._info) in key_funcs: if getattr(self._info, key_funcs[type(self._info)])() == name: result = True return result @Singleton class OpLastCheckpointChecker(object): def __init__(self): self.raw_version_map = core.get_op_version_map() self.checkpoints_map = {} self._construct_map() def _construct_map(self): for op_name in self.raw_version_map: last_checkpoint = self.raw_version_map[op_name].checkpoints()[-1] infos = last_checkpoint.version_desc().infos() self.checkpoints_map[op_name] = infos def filter_updates(self, op_name, type=core.OpUpdateType.kInvalid, key=''): updates = [] if op_name in self.checkpoints_map: for update in self.checkpoints_map[op_name]: if (update.type() == type) or (type == core.OpUpdateType.kInvalid): if OpUpdateInfoHelper(update.info()).verify_key_value(key): updates.append(update.info()) return updates
[ "noreply@github.com" ]
gongweibao.noreply@github.com
fd4c49e440c3a33e97213c80f5a63d98a62df18e
6e800b3513537622df14bb598abe9c051116106c
/jianzhioffer/21Exchange.py
dc6f729dfe73659af2d56f13c6281d19f196046d
[]
no_license
Huxhh/LeetCodePy
fd72f03193d1f0b58c44bffc46a9a59ba9714215
6a99e84c5742ca68012b14da362f6c3255e10b21
refs/heads/master
2023-06-09T09:23:54.209025
2023-05-31T16:29:03
2023-05-31T16:29:03
148,866,001
0
0
null
null
null
null
UTF-8
Python
false
false
627
py
# coding=utf-8 # author huxh # time 2020/3/24 10:44 AM def exchange(nums): if not nums: return [] l = 0 r = len(nums) - 1 while l < r: while l < r and nums[l] & 1: l += 1 while l < r and not nums[r] & 1: r -= 1 nums[l], nums[r] = nums[r], nums[l] return nums def exchange2(nums): if not nums: return [] l = 0 r = 0 while r < len(nums): if nums[r] & 1: nums[r], nums[l] = nums[l], nums[r] l += 1 r += 1 return nums if __name__ == '__main__': print(exchange2([1,3,4,6,7,9]))
[ "563255387@qq.com" ]
563255387@qq.com
d6851302274970ef6f014533abcffa0f53972792
9c35adeaa3c73f4d49af6cbe64a63cce1957475a
/views/room.py
96e8f91783de065c5bf97729497c81bd417f7e95
[]
no_license
longfeilove7/ClusterManager
a6e275cee8e5381019d539baef184cdb5ac4f078
d2f8a973c2ddcd75395916974d733f6cfd5346a9
refs/heads/master
2020-03-16T22:58:50.085678
2019-01-03T01:17:47
2019-01-03T01:17:47
133,060,028
2
0
null
null
null
null
UTF-8
Python
false
false
6,903
py
""" 命名规范:module_name, package_name, ClassName, method_name, ExceptionName, function_name, GLOBAL_VAR_NAME, instance_var_name, function_parameter_name, local_var_name. """ from rest_framework_swagger.views import get_swagger_view from django.db.models import Count, Max, Avg, Min, Sum, F, Q, FloatField from django.db import models from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.shortcuts import render, redirect from django.shortcuts import HttpResponse from django.http import HttpRequest, HttpResponseBadRequest from HostManager import models from django_celery_beat.models import PeriodicTask from django_celery_beat.models import PeriodicTasks from django_celery_beat.models import CrontabSchedule from django_celery_beat.models import IntervalSchedule from django_celery_beat.models import SolarSchedule from django_celery_results.models import TaskResult from celery import shared_task from celery import task from HostManager import tasks from celery import Celery from celery.schedules import crontab from celery import app from django.views.decorators.csrf import csrf_protect from django.views.decorators.csrf import csrf_exempt import json import datetime import pytz from django.utils import timezone from itertools import chain #import django_excel as excel from HostManager.models import Question, Choice, Host, Rooms from django import forms # json can't service datetime format,so use the djangojsonencoder from django.core.serializers import serialize from django.core.serializers.json import DjangoJSONEncoder from decimal import * #import os, sys, commands import xmlrpc.server import xmlrpc.client from django.contrib.auth.decorators import login_required # Create your views here. class ClassRoom: @login_required def addRoom(request): if request.method == 'GET': room_list = models.Rooms.objects.all() return render(request, 'add_room.html', {'room_list': room_list}) elif request.method == 'POST': roomName = request.POST.get('roomName') cabinetNumber = request.POST.get('cabinetNumber') floor = request.POST.get('floor') roomArea = request.POST.get('roomArea') models.Rooms.objects.create( roomName=roomName, cabinetNumber=cabinetNumber, floor=floor, roomArea=roomArea) return redirect('/add_room/') @login_required def roomInfoQuery(request): info_list = models.Rooms.objects.all() limit = request.GET.get('limit') # how many items per page #print("the limit :"+limit) offset = request.GET.get('offset') # how many items in total in the DB #print("the offset :",offset) sort_column = request.GET.get('sort') # which column need to sort search = request.GET.get('search') if sort_column: print("the sort_column :" + sort_column) order = request.GET.get('order') # ascending or descending print("the order :" + order) if order == "asc": info_list = models.Rooms.objects.order_by(sort_column) else: info_list = models.Rooms.objects.order_by("-" + sort_column) print(info_list) elif search: # 判断是否有搜索字 info_list = models.Rooms.objects.filter( Q(id__icontains=search) | Q(roomName__icontains=search) | Q(cabinetNumber__icontains=search) | Q(floor__icontains=search) | Q(roomArea__icontains=search)) else: info_list = models.Rooms.objects.all( ) # must be wirte the line code here info_list_count = len(info_list) print(info_list_count) if not offset: offset = 0 if not limit: limit = 10 # 默认是每页20行的内容,与前端默认行数一致 pageinator = Paginator(info_list, limit) # 利用Django的Painator开始做分页 page = int(int(offset) / int(limit) + 1) print("the page:", page) info_list_dict = { "total": info_list_count, "rows": [] } # 必须带有rows和total这2个key,total表示总数,rows表示每行的内容 for item in pageinator.page(page): info_list_dict['rows'].append({ "id": item.id, "roomName": item.roomName, "cabinetNumber": item.cabinetNumber, "floor": item.floor, "roomArea": item.roomArea }) info_list_json = json.dumps(info_list_dict) return HttpResponse( info_list_json, content_type="application/json", ) @login_required def roomEdit(request, nid): if request.method == 'POST': roomName = request.POST.get('roomName') cabinetNumber = request.POST.get('cabinetNumber') floor = request.POST.get('floor') roomArea = request.POST.get('roomArea') models.Rooms.objects.filter(id=nid).update( roomName=roomName, cabinetNumber=cabinetNumber, floor=floor, roomArea=roomArea) print(roomName) return redirect('/add_room/') @login_required def roomDelete(request): if request.method == 'POST': ipmiID = request.POST.get('allValue') obj = models.Host.objects.filter(roomName_id=ipmiID).first() if obj: dictDelete = [ipmiID, 0] else: models.Rooms.objects.filter(id=ipmiID).delete() dictDelete = [ipmiID, 1] data = json.dumps(dictDelete).encode() return HttpResponse(data) @login_required def batchRoomDelete(request): """""" context = {} if request.method == 'POST': allValue = request.POST.get('allValue') print("the allValue: ", allValue, type(allValue)) listAllValue = json.loads(allValue) print("the listAllValue: ", listAllValue, type(listAllValue)) listDelete = [] for dictAllValue in listAllValue: print(type(dictAllValue)) ipmiID = dictAllValue['id'] print(ipmiID) obj = models.Host.objects.filter(roomName_id=ipmiID).first() if obj: dictDelete = [ipmiID, 0] listDelete.append(dictDelete) else: models.Rooms.objects.filter(id=ipmiID).delete() dictDelete = [ipmiID, 1] listDelete.append(dictDelete) data = json.dumps(listDelete).encode() return HttpResponse(data)
[ "root@localhost.localdomain" ]
root@localhost.localdomain
9f6df0b9d667e48f2a477fe0fe0a8f9e65ad8660
4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97
/sols/alien_dictionary.py
5055b53bd8f24e5405e99010a8f3c21326e00665
[]
no_license
hayeonk/leetcode
5136824838eb17ed2e4b7004301ba5bb1037082f
6485f8f9b5aa198e96fbb800b058d9283a28e4e2
refs/heads/master
2020-04-28T03:37:16.800519
2019-06-01T14:34:45
2019-06-01T14:34:45
174,943,756
0
1
null
null
null
null
UTF-8
Python
false
false
1,456
py
from collections import defaultdict class Solution(object): def alienOrder(self, words): def buildGraph(w1, w2): i = j =0 while i < len(w1) and j < len(w2): if w1[i] != w2[j]: graph[w1[i]].append(w2[i]) break else: graph[w1[i]] i += 1 j += 1 while i < len(w1): graph[w1[i]] i += 1 while j < len(w2): graph[w2[j]] j += 1 graph = defaultdict(list) last = "" for i in xrange(len(words)): buildGraph(last, words[i]) last = words[i] def dfs(u, recStack): visited.add(u) recStack.add(u) if u in graph: for v in graph[u]: if v not in visited: if not dfs(v, recStack): return False elif v in recStack: return False recStack.remove(u) ans.append(u) return True ans = [] visited = set() for c in graph: if c not in visited: if not dfs(c, set()): return "" return "".join(ans[::-1])
[ "31617695+hayeonk@users.noreply.github.com" ]
31617695+hayeonk@users.noreply.github.com
90e2690474c76dfd0c66852f7808dfb0f2d8a6c3
93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3
/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py
f61d1ab888a51b2ebe4d1205b30fb84dfa4e7aeb
[ "Apache-2.0" ]
permissive
hutuxian/Paddle
f8b7693bccc6d56887164c1de0b6f6e91cffaae8
a1b640bc66a5cc9583de503e7406aeba67565e8d
refs/heads/develop
2023-08-29T19:36:45.382455
2020-09-09T09:19:07
2020-09-09T09:19:07
164,977,763
8
27
Apache-2.0
2023-06-16T09:47:39
2019-01-10T02:50:31
Python
UTF-8
Python
false
false
2,996
py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import paddle import paddle.nn as nn import paddle.fluid as fluid import numpy as np class LeNetDygraph(fluid.dygraph.Layer): def __init__(self, num_classes=10, classifier_activation='softmax'): super(LeNetDygraph, self).__init__() self.num_classes = num_classes self.features = nn.Sequential( nn.Conv2d( 1, 6, 3, stride=1, padding=1), nn.ReLU(), nn.Pool2D(2, 'max', 2), nn.Conv2d( 6, 16, 5, stride=1, padding=0), nn.ReLU(), nn.Pool2D(2, 'max', 2)) if num_classes > 0: self.fc = nn.Sequential( nn.Linear(400, 120), nn.Linear(120, 84), nn.Linear(84, 10), nn.Softmax()) #Todo: accept any activation def forward(self, inputs): x = self.features(inputs) if self.num_classes > 0: x = fluid.layers.flatten(x, 1) x = self.fc(x) return x def init_weights(layer): if type(layer) == nn.Linear: new_weight = paddle.fill_constant( layer.weight.shape, layer.weight.dtype, value=0.9) layer.weight.set_value(new_weight) new_bias = paddle.fill_constant( layer.bias.shape, layer.bias.dtype, value=-0.1) layer.bias.set_value(new_bias) elif type(layer) == nn.Conv2d: new_weight = paddle.fill_constant( layer.weight.shape, layer.weight.dtype, value=0.7) layer.weight.set_value(new_weight) new_bias = paddle.fill_constant( layer.bias.shape, layer.bias.dtype, value=-0.2) layer.bias.set_value(new_bias) class TestLayerApply(unittest.TestCase): def test_apply_init_weight(self): with fluid.dygraph.guard(): net = LeNetDygraph() net.apply(init_weights) for layer in net.sublayers(): if type(layer) == nn.Linear: np.testing.assert_allclose(layer.weight.numpy(), 0.9) np.testing.assert_allclose(layer.bias.numpy(), -0.1) elif type(layer) == nn.Conv2d: np.testing.assert_allclose(layer.weight.numpy(), 0.7) np.testing.assert_allclose(layer.bias.numpy(), -0.2) if __name__ == '__main__': unittest.main()
[ "noreply@github.com" ]
hutuxian.noreply@github.com
b3c3968421eca0da3d4b2b7b48389e1ed8c6ac29
c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34
/source/Clarification/DFS_BFS/127.单词接龙.py
3fa9f475399d5187665f9e3f4d03a439ad1aaffc
[ "MIT" ]
permissive
zhangwang0537/LeetCode-Notebook
73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1
1dbd18114ed688ddeaa3ee83181d373dcc1429e5
refs/heads/master
2022-11-13T21:08:20.343562
2020-04-09T03:11:51
2020-04-09T03:11:51
277,572,643
0
0
MIT
2020-07-06T14:59:57
2020-07-06T14:59:56
null
UTF-8
Python
false
false
3,824
py
# 给定两个单词(beginWord 和 endWord)和一个字典,找到从 beginWord 到 endWord 的最短转换序列的长度。转换需遵循如下规则: # # 每次转换只能改变一个字母。 # 转换过程中的中间单词必须是字典中的单词。 # 说明: # # 如果不存在这样的转换序列,返回 0。 # 所有单词具有相同的长度。 # 所有单词只由小写字母组成。 # 字典中不存在重复的单词。 # 你可以假设 beginWord 和 endWord 是非空的,且二者不相同。 # 示例 1: # # 输入: # beginWord = "hit", # endWord = "cog", # wordList = ["hot","dot","dog","lot","log","cog"] # # 输出: 5 # # 解释: 一个最短转换序列是 "hit" -> "hot" -> "dot" -> "dog" -> "cog", # 返回它的长度 5。 # 示例 2: # # 输入: # beginWord = "hit" # endWord = "cog" # wordList = ["hot","dot","dog","lot","log"] # # 输出: 0 # # 解释: endWord "cog" 不在字典中,所以无法进行转换。 from collections import defaultdict class Solution(object): def __init__(self): self.length = 0 # Dictionary to hold combination of words that can be formed, # from any given word. By changing one letter at a time. self.all_combo_dict = defaultdict(list) def visitWordNode(self, queue, visited, others_visited): current_word, level = queue.pop(0) for i in range(self.length): # Intermediate words for current word intermediate_word = current_word[:i] + "*" + current_word[i+1:] # Next states are all the words which share the same intermediate state. for word in self.all_combo_dict[intermediate_word]: # If the intermediate state/word has already been visited from the # other parallel traversal this means we have found the answer. if word in others_visited: return level + others_visited[word] if word not in visited: # Save the level as the value of the dictionary, to save number of hops. visited[word] = level + 1 queue.append((word, level + 1)) return None def ladderLength(self, beginWord, endWord, wordList): """ :type beginWord: str :type endWord: str :type wordList: List[str] :rtype: int """ if endWord not in wordList or not endWord or not beginWord or not wordList: return 0 # Since all words are of same length. self.length = len(beginWord) for word in wordList: for i in range(self.length): # Key is the generic word # Value is a list of words which have the same intermediate generic word. self.all_combo_dict[word[:i] + "*" + word[i+1:]].append(word) # Queues for birdirectional BFS queue_begin = [(beginWord, 1)] # BFS starting from beginWord queue_end = [(endWord, 1)] # BFS starting from endWord # Visited to make sure we don't repeat processing same word visited_begin = {beginWord: 1} visited_end = {endWord: 1} ans = None # We do a birdirectional search starting one pointer from begin # word and one pointer from end word. Hopping one by one. while queue_begin and queue_end: # One hop from begin word ans = self.visitWordNode(queue_begin, visited_begin, visited_end) if ans: return ans # One hop from end word ans = self.visitWordNode(queue_end, visited_end, visited_begin) if ans: return ans return 0
[ "mzm@mail.dlut.edu.cn" ]
mzm@mail.dlut.edu.cn
3b64861964aacf042ab29afae8f2a3f49608ae1b
f7d0f201f9e4730e334ccd1c0050831af46110c7
/problem001.py
89efcbea327cf84c98d78a3d2311ab572552843c
[]
no_license
1UnboundedSentience/projecteuler
4f0d0b1a7d289e344543caa7f5695743e122dd53
1fd4184a3de9aea07bffa827404a3fdc07178edf
refs/heads/master
2021-01-15T23:36:04.303115
2012-01-13T00:43:20
2012-01-13T00:43:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
434
py
""" If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000. From http://projecteuler.net/index.php?section=problems&id=1 """ def problem001(max): return sum([i for i in range(max) if i % 3 == 0 or i % 5 == 0]) if __name__ == '__main__': assert problem001(10) == 23 print problem001(1000)
[ "stein.magnus@jodal.no" ]
stein.magnus@jodal.no
057df236c8787cde17bb88efa4a7e8f67e6a7230
e781b0dfd0a193fa229c81dd816f8977529e9c47
/plenum/test/checkpoints/test_checkpoint_stable_while_unstashing.py
6eba14a195d8dbca6d3981a7a6b9bfae4d7f566b
[ "Apache-2.0" ]
permissive
ddntechssi/indy-plenum
b8a2ac597b8249994fa0b9e0aa3bb7965c02a693
16868467e1340a5557f7d610370dce5a59c6097b
refs/heads/master
2020-04-30T04:35:54.054594
2019-03-19T18:41:28
2019-03-19T18:41:28
176,614,246
1
0
Apache-2.0
2019-03-19T23:27:13
2019-03-19T23:27:13
null
UTF-8
Python
false
false
3,072
py
from plenum.test.checkpoints.helper import chkChkpoints, check_stashed_chekpoints from plenum.test.delayers import ppDelay, msg_rep_delay from plenum.test.helper import sdk_send_random_and_check, assertExp from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.stasher import delay_rules from stp_core.loop.eventually import eventually CHK_FREQ = 5 nodeCount = 7 # it's crucial for this test to have f > 1 def test_stabilize_checkpoint_while_unstashing_when_missing_pre_prepare(looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): # Prepare nodes lagging_node = txnPoolNodeSet[-1] lagging_master_replcia = lagging_node.master_replica rest_nodes = txnPoolNodeSet[:-1] # 1. send enough requests so that just 1 is left for checkpoint stabilization sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_checkpoint - 1) # 2. delay PrePrepare on 1 node so that prepares and commits will be stashed with delay_rules(lagging_node.nodeIbStasher, ppDelay()): with delay_rules(lagging_node.nodeIbStasher, msg_rep_delay()): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) # all good nodes stabilized checkpoint looper.run(eventually(chkChkpoints, rest_nodes, 1, 0)) # bad node received checkpoints from all nodes but didn't stabilize it looper.run(eventually(check_stashed_chekpoints, lagging_node, len(rest_nodes))) looper.run(eventually(chkChkpoints, [lagging_node], 1, None)) # bad node has all commits and prepares for the last request stashed looper.run(eventually( lambda: assertExp( (0, CHK_FREQ) in lagging_master_replcia.preparesWaitingForPrePrepare and len(lagging_master_replcia.preparesWaitingForPrePrepare[(0, CHK_FREQ)]) == len(rest_nodes) - 1 ) )) looper.run(eventually( lambda: assertExp( (0, CHK_FREQ) in lagging_master_replcia.commitsWaitingForPrepare and len(lagging_master_replcia.commitsWaitingForPrepare[(0, CHK_FREQ)]) == len(rest_nodes) ) )) # 3. the delayed PrePrepare is processed, and stashed prepares and commits are unstashed # checkpoint will be stabilized during unstashing, and the request will be ordered looper.run(eventually(chkChkpoints, [lagging_node], 1, 0)) waitNodeDataEquality(looper, *txnPoolNodeSet, customTimeout=5)
[ "alexander.sherbakov@dsr-corporation.com" ]
alexander.sherbakov@dsr-corporation.com
68fe1cc9388b76c4e397bfc4c36a42288ee36988
d375819f9de5760acc860af433b87ed52cfe64e8
/wyggles/sprite/engine.py
38a1ea5fb4473586c9053657ed5d1c1649fb3d6b
[]
no_license
kfields/wyggles-old
a6bfc568cd470447da2aaae1a6ad7ca4ca901858
cb9f7ea9ef47e4b951c8a498952d904f28030317
refs/heads/master
2022-04-22T21:11:34.153235
2020-03-15T23:08:52
2020-03-15T23:08:52
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,228
py
import math import os #import random from random import random import sys from .layer import Layer from wyggles.mathutils import * from .collision import Collision worldMinX = 0 worldMinY = 0 worldMaxX = 640 worldMaxY = 480 def without( source, element): temp = source[:] try: while temp: temp.remove( element ) except: return temp return temp def materializeRandomFromCenter(sprite): halfMaxX = worldMaxX / 2 halfMaxY = worldMaxX / 2 diameter = 400 radius = diameter / 2 sprite.materializeAt( (halfMaxX - radius) + (random() * diameter), (halfMaxY - radius) + (random() * diameter)) class SpriteEngine(): def __init__(self): self.root = Layer("root") # self.actors = [] self.beacons = [] self.bodies = [] self.collisions = [] self.idCounter = 0 # self.gravityX = 0 #self.gravityY = 9.8 ; self.gravityY = 0 def addActor(self, actor): self.actors.append(actor) def removeActor(self, actor) : #self.actors = self.actors.without(actor) self.actors.remove(actor) def addBeacon(self, beacon) : self.beacons.append(beacon) ; def removeBeacon(self, beacon): #self.beacons = self.beacons.without(beacon) self.beacons.remove(beacon) def addBody(self, body): self.bodies.append(body) def removeBody(self, body): #self.bodies = self.bodies.without(body) self.bodies.remove(body) def addCollision(self, collision): self.collisions.append(collision) def removeCollision(self, collision): #self.collisions = self.collisions.without(collision) self.collisions.remove(collision) def findCollision(self, b1, b2): collision = None for collision in self.collisions: if(collision.b1 == b1 and collision.b2 == b2 or collision.b1 == b2 and collision.b2 == b1): return collision ; return None ; def step(self, dt): dt = .1 ; inv_dt = 0 if(dt > 0.0): inv_dt = 1.0 / dt b = None # self.broadphase() ; # for b in self.bodies: if(b.invMass == 0.0): continue b.velX += dt * (self.gravityX + b.invMass * b.forceX) b.velY += dt * (self.gravityY + b.invMass * b.forceY) b.angularVel += dt * b.invI * b.torque # ... insert penetration constraints here ... for collision in self.collisions: if(not collision.touched): continue collision.preStep(inv_dt) # iterations = 1 i = 0; while(i < iterations): i = i + 1 for collision in self.collisions: if(not collision.touched): continue collision.applyImpulse() # for collision in self.collisions: if(not collision.touched): continue collision.postStep() # for b in self.bodies: if(b.invMass == 0.0): continue b.setPos(b.x + dt * (b.velX + b.biasedVelX), b.y + dt * (b.velY + b.biasedVelY)) ; b.rotation += dt * (b.angularVel + b.biasedAngularVel); #Bias velocities are reset to zero each step. b.biasedVelX = 0 b.biasedVelY = 0 b.biasedAngularVel = 0 b.forceX = 0 b.forceY = 0 b.torque = 0 # b.step() # for actor in self.actors: actor.step() # #self.renderer.render() self.render() def broadphase(self): b1 = None b2 = None for b1 in self.bodies: for b2 in self.bodies: if(b1 == b2): continue if (b1.invMass == 0.0 and b2.invMass == 0.0): continue if(not b1.intersects(b2)): continue collision = self.findCollision(b1, b2) if(collision == None): collision = Collision(b1,b2) self.addCollision(collision) collision.collide() ; def query(self, x, y, distance): beacon = None result = None for beacon in self.beacons: dist = distance2d(x, y, beacon.x, beacon.y) if(dist < distance): if(result == None): result = [beacon] else: result.append(beacon) return result def genId(self, name): return name + str(self.idCounter) self.idCounter += 1 # def render(self): self.root.render() def get_root(self): return self.root #fixme spriteEngine = SpriteEngine()
[ "kurtisfields@gmail.com" ]
kurtisfields@gmail.com
8e747d7a1899319d6f0f55134ae42cec7a6a1c63
017d82f3e3040fbce485a0135c062061648f91f0
/013/013.py
c4839965ceb730023dc41e005ff02f424be4d096
[]
no_license
bashwork/project-euler
404b7e2bdd99888cdb2dfae6b2272ed3730a5aa0
84cc18968a618a17584c4455f94e2e57f9def2cb
refs/heads/master
2016-09-05T21:17:04.754346
2015-02-06T19:49:03
2015-02-06T19:49:03
379,188
3
0
null
null
null
null
UTF-8
Python
false
false
5,641
py
vector = [ 37107287533902102798797998220837590246510135740250, 46376937677490009712648124896970078050417018260538, 74324986199524741059474233309513058123726617309629, 91942213363574161572522430563301811072406154908250, 23067588207539346171171980310421047513778063246676, 89261670696623633820136378418383684178734361726757, 28112879812849979408065481931592621691275889832738, 44274228917432520321923589422876796487670272189318, 47451445736001306439091167216856844588711603153276, 70386486105843025439939619828917593665686757934951, 62176457141856560629502157223196586755079324193331, 64906352462741904929101432445813822663347944758178, 92575867718337217661963751590579239728245598838407, 58203565325359399008402633568948830189458628227828, 80181199384826282014278194139940567587151170094390, 35398664372827112653829987240784473053190104293586, 86515506006295864861532075273371959191420517255829, 71693888707715466499115593487603532921714970056938, 54370070576826684624621495650076471787294438377604, 53282654108756828443191190634694037855217779295145, 36123272525000296071075082563815656710885258350721, 45876576172410976447339110607218265236877223636045, 17423706905851860660448207621209813287860733969412, 81142660418086830619328460811191061556940512689692, 51934325451728388641918047049293215058642563049483, 62467221648435076201727918039944693004732956340691, 15732444386908125794514089057706229429197107928209, 55037687525678773091862540744969844508330393682126, 18336384825330154686196124348767681297534375946515, 80386287592878490201521685554828717201219257766954, 78182833757993103614740356856449095527097864797581, 16726320100436897842553539920931837441497806860984, 48403098129077791799088218795327364475675590848030, 87086987551392711854517078544161852424320693150332, 59959406895756536782107074926966537676326235447210, 69793950679652694742597709739166693763042633987085, 41052684708299085211399427365734116182760315001271, 65378607361501080857009149939512557028198746004375, 35829035317434717326932123578154982629742552737307, 94953759765105305946966067683156574377167401875275, 88902802571733229619176668713819931811048770190271, 25267680276078003013678680992525463401061632866526, 36270218540497705585629946580636237993140746255962, 24074486908231174977792365466257246923322810917141, 91430288197103288597806669760892938638285025333403, 34413065578016127815921815005561868836468420090470, 23053081172816430487623791969842487255036638784583, 11487696932154902810424020138335124462181441773470, 63783299490636259666498587618221225225512486764533, 67720186971698544312419572409913959008952310058822, 95548255300263520781532296796249481641953868218774, 76085327132285723110424803456124867697064507995236, 37774242535411291684276865538926205024910326572967, 23701913275725675285653248258265463092207058596522, 29798860272258331913126375147341994889534765745501, 18495701454879288984856827726077713721403798879715, 38298203783031473527721580348144513491373226651381, 34829543829199918180278916522431027392251122869539, 40957953066405232632538044100059654939159879593635, 29746152185502371307642255121183693803580388584903, 41698116222072977186158236678424689157993532961922, 62467957194401269043877107275048102390895523597457, 23189706772547915061505504953922979530901129967519, 86188088225875314529584099251203829009407770775672, 11306739708304724483816533873502340845647058077308, 82959174767140363198008187129011875491310547126581, 97623331044818386269515456334926366572897563400500, 42846280183517070527831839425882145521227251250327, 55121603546981200581762165212827652751691296897789, 32238195734329339946437501907836945765883352399886, 75506164965184775180738168837861091527357929701337, 62177842752192623401942399639168044983993173312731, 32924185707147349566916674687634660915035914677504, 99518671430235219628894890102423325116913619626622, 73267460800591547471830798392868535206946944540724, 76841822524674417161514036427982273348055556214818, 97142617910342598647204516893989422179826088076852, 87783646182799346313767754307809363333018982642090, 10848802521674670883215120185883543223812876952786, 71329612474782464538636993009049310363619763878039, 62184073572399794223406235393808339651327408011116, 66627891981488087797941876876144230030984490851411, 60661826293682836764744779239180335110989069790714, 85786944089552990653640447425576083659976645795096, 66024396409905389607120198219976047599490197230297, 64913982680032973156037120041377903785566085089252, 16730939319872750275468906903707539413042652315011, 94809377245048795150954100921645863754710598436791, 78639167021187492431995700641917969777599028300699, 15368713711936614952811305876380278410754449733078, 40789923115535562561142322423255033685442488917353, 44889911501440648020369068063960672322193204149535, 41503128880339536053299340368006977710650566631954, 81234880673210146739058568557934581403627822703280, 82616570773948327592232845941706525094512325230608, 22918802058777319719839450180888072429661980811197, 77158542502016545090413245809786882778948721859617, 72107838435069186155435662884062257473692284509516, 20849603980134001723930671666823555245252804609722, 53503534226472524250874054075591789781264330331690, ] print str(sum(vector))[:10]
[ "bashwork@gmail.com" ]
bashwork@gmail.com
244bb265da9da87390906151f61c5aa088940dec
fd65851c7977176cfa69056ea5d63ca529e74271
/components/google-cloud/google_cloud_pipeline_components/container/experimental/gcp_launcher/utils/json_util.py
cdc9e256749386fb21acd07efaa92ce44cf45fc0
[ "Apache-2.0", "BSD-3-Clause", "MIT", "BSD-2-Clause" ]
permissive
NikeNano/pipelines
dad9f45267a7f4c495a30880dd6fe1570f26fa64
73804f8928ce671839d34800627b6d3ea9f820a7
refs/heads/master
2022-01-29T21:24:43.693120
2021-11-20T18:18:35
2021-11-20T18:18:35
221,051,451
1
1
Apache-2.0
2021-04-23T20:07:11
2019-11-11T19:11:29
Python
UTF-8
Python
false
false
1,565
py
# Copyright 2021 The Kubeflow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json # TODO(IronPan) This library can be removed once ifPresent is supported within concat[] in component YAML V2. # Currently the component YAML will generate the payload with all API fields presented, # and those fields will be left empty if user doesn't specify them in the Python. def __remove_empty(j): """Remove the empty fields in the Json.""" if isinstance(j, list): return list(filter(None, [__remove_empty(i) for i in j])) if isinstance(j, dict): final_dict = {} for k, v in j.items(): if v: final_dict[k] = __remove_empty(v) return final_dict return j def recursive_remove_empty(j): """Recursively remove the empty fields in the Json until there is no empty fields and sub-fields.""" needs_update = True while needs_update: new_j = __remove_empty(j) needs_update = json.dumps(new_j) != json.dumps(j) j = new_j return j
[ "noreply@github.com" ]
NikeNano.noreply@github.com
12357a3852565d74b4832a5ccb00cc3298eb2a2f
fac68cda1a9e79d8f040ca632f0353ccb8d20c8c
/backtesting using zipline/zip2.py
5269a754a5da563c9b213a17606482a38b07ac13
[]
no_license
fagan2888/Algo-trading-strategy
11a9b5f70f53492d5b407ac8593af6921a6d44c1
4450f5a28f069e0e695843b0f69197519fa5c1da
refs/heads/master
2022-01-11T19:07:05.983331
2019-05-28T05:15:11
2019-05-28T05:15:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,671
py
import pytz from datetime import datetime from zipline.api import order, symbol, record, order_target from zipline.algorithm import TradingAlgorithm from zipline.data.loader import load_bars_from_yahoo import pyexcel # Load data manually from Yahoo! finance start = datetime(2011, 1, 1, 0, 0, 0, 0, pytz.utc).date() end = datetime(2012,1,1,0,0,0,0, pytz.utc).date() data = load_bars_from_yahoo(stocks=['SPY'], start=start,end=end) #code def initialize(context): context.security = symbol('SPY') #code def handle_data(context, data): MA1 = data[context.security].mavg(50) MA2 = data[context.security].mavg(100) date = str(data[context.security].datetime)[:10] current_price = data[context.security].price current_positions = context.portfolio.positions[symbol('SPY')].amount cash = context.portfolio.cash value = context.portfolio.portfolio_value current_pnl = context.portfolio.pnl if (MA1 > MA2) and current_positions == 0: number_of_shares = int(cash/current_price) order(context.security, number_of_shares) record(date=date,MA1 = MA1, MA2 = MA2, Price= current_price,status="buy",shares=number_of_shares,PnL=current_pnl,cash=cash,value=value) elif (MA1 < MA2) and current_positions != 0: order_target(context.security, 0) record(date=date,MA1 = MA1, MA2 = MA2, Price= current_price,status="sell",shares="--",PnL=current_pnl,cash=cash,value=value) else: record(date=date,MA1 = MA1, MA2 = MA2, Price= current_price,status="--",shares="--",PnL=current_pnl,cash=cash,value=value)
[ "noreply@github.com" ]
fagan2888.noreply@github.com
3b31b8b5fde1d17b8fc786ed38466bf518c1d6ff
2251d71bc3ecb589ce1a8b274a08370c3240bf51
/0238 Product of Array Except Self.py
b83c04107ed1d7745bf70b11871df718760b6139
[]
no_license
YuanyuanQiu/LeetCode
3495a3878edc2028f134bddb5b9ec963069562cb
6f5d0ef6a353713c0b41fa7ec0fb8c43a7e8dc55
refs/heads/master
2022-12-11T04:04:01.686226
2022-12-06T18:42:14
2022-12-06T18:42:14
231,168,173
0
0
null
null
null
null
UTF-8
Python
false
false
1,251
py
#def productExceptSelf(self, nums: List[int]) -> List[int]: # n = len(nums) # zeros = nums.count(0) # # if zeros > 1: # return [0] * n # # product = 1 # for num in nums: # if num != 0: # product *= num # # if zeros == 1: # res = [0] * n # idx = nums.index(0) # res[idx] = product # return res # # res = [] # for i in range(n): # res.append(int(product/nums[i])) # # return res def productExceptSelf(self, nums: List[int]) -> List[int]: length = len(nums) answer = [0]*length # answer[i] 表示索引 i 左侧所有元素的乘积 # 因为索引为 '0' 的元素左侧没有元素, 所以 answer[0] = 1 answer[0] = 1 for i in range(1, length): answer[i] = nums[i - 1] * answer[i - 1] # R 为右侧所有元素的乘积 # 刚开始右边没有元素,所以 R = 1 R = 1; for i in reversed(range(length)): # 对于索引 i,左边的乘积为 answer[i],右边的乘积为 R answer[i] = answer[i] * R # R 需要包含右边所有的乘积,所以计算下一个结果时需要将当前值乘到 R 上 R *= nums[i] return answer
[ "50243732+YuanyuanQiu@users.noreply.github.com" ]
50243732+YuanyuanQiu@users.noreply.github.com
a2fdbe7e8a4cdb9897e6296c6966bbe1bde3e053
8fd695abd0b8b2523e42786d6b90fa99058545c5
/horsempdc/art.py
8f87fadafbf7aee0db1a70e2bd9a45a6e8ca83ec
[]
no_license
jbremer/horsempdc
109eb7ad9fd04124707fbb2955152996d845f3bb
4ec1fdc6926c3f83face5bcbee16c53a761fc6bf
refs/heads/master
2016-09-10T02:10:23.101176
2015-12-17T14:39:32
2015-12-17T14:39:32
23,502,118
0
1
null
2015-12-16T13:57:33
2014-08-30T22:45:35
Python
UTF-8
Python
false
false
4,491
py
# Copyright (C) 2014 Jurriaan Bremer. # This file is part of HorseMPDC - http://www.horsempdc.org/. # See the file 'docs/LICENSE.txt' for copying permission. # Thanks to http://www.asciiworld.com/-Horses-.html _doge_horse = r""" |\ /| ___| \,,/_/ ---__/ \/ \ __--/ (D) \ _ -/ (_ \ // / \_ / ==\ __-------_____--___--/ / \_ O o) / / \==/` / / || ) \_/\ || / _ / | | | /--______ ___\ /\ : | / __- - _/ ------ | | \ \ | - - / | | \ ) | | - | | ) | | | | | | | | | | | | < | | | |_/ < | /__\ < \ /__\ /___\ """ # Thanks to Neil Smith, http://www.ascii-art.de/ascii/ghi/horse.txt _dumb_horse = r""" ./|,,/| < o o) <\ ( | <\\ |\ | <\\\ |(__) <\\\\ | """ # Thanks to http://www.asciiworld.com/-Horses-.html _angry_horse = r""" ,, ,,, ,,,, ,,,,,,,, /\ /;; ;;;;;;;;;;;;;; ;;;/ ,;`. ,,,, ; `-. /// //////// ///// // ,','`;. ///;;;;,. ,' ,,`-.;;;;;; ;;;;;;; ;;;;// ,' ,' `.`. ///;;//;, ,' ;;;//////// ////// ///////,' ,' ; : ;;// ;//, `. ;`;;;;;;;: ;;;;:;; ;:;:;;:;: ,' ,' : ;;;;;;;;/, `. `; :!::::!;;;;;!::::!;!;;!;: `. ,' ,'///!!;;;;;; `._!!;!!!!;!!!!!;!!!!;!;!!;!!`. `;' ,'-.!!!//;;;//// ; . . , ,' ::-!_///;;;; .' ,%' ,%' `%. `%.;; `%. ;; ,:: `! //// .', ' ' `%, `:. `::. :: :; %:: `! ;; ,';; `%, `;;. `::. `.;;; `:% %:/// ,';;' ; ;; `::; `%, ;%:. :: :: %`!/ ,' ;.' .%. ;; `;; ;; ' `; % :: % : : `;; %%% `:: ;; ;;; ` ` :: % ` ; ' .%%' `% ; ' ,., `;; `%, ::' %::% ;`. `. %%%% ;; .___;;;; ' `: `; :: ::: : : ; %%%% ;: ,:' _ `.`. ;;; ;; `:: :::. `.; ; `%%' ;;' :: (0) ; : ::' ; :: `::: ,' ;' %%' ;;' ;;.___,',; ;; ;; ; ,::: , ;' :%: ;; ,'------'' ;;;' .;; :::' ,' ;; ;%; ;; ' ::' ,;;; ::: : :' :%: `; ;;;;' ;; ::% : ;; :%' ;; ;...,,;;'' ;;' ; ; ::: ; `; :: ;;' ,:::' . .;; ,' ;; `;; ; ;' :: .;;' ,:::' ,::%. ;;; ,' ;; ,;; : ;;. .:' ;;' ,:::' ;;:::' ;; ;;' ,' ;;; ;;;' :`;; :: ;; ;;;' ' . ;; ' _,-' ;;; `;' : ;' .:' ;; .::: ,%'`; ;;; _,-' .;;;' ;' ,' ;; ;; ;;' :::' ,, .; ;; _,' ; ,;;;' ,;;' .'~~~~~~~~~._ ,;' ,',' ;; ',-' ,' ,';; ;;;' ;;; ,' `-.,' .' ;; ,' ,' ;;;;;;' ,;; ;;; .'; . `., ;; ,' ; ,;;% ;;; : .. _.'; ; '_,' .' ,,,,,,,%;;' `;;; `. . (_.' . ;' ,-' : ,,,,,;;;;;;;;;' .;;; `-._ ___,' ,' :..\"\"\"\"\"`````' ,;;;; `------'____.' : ..;;;; `---' `. ..;;;;' :......:::::::::;;;;' :::::::::::::::;' ,;;; ; ;;;;' ; .;;;; ,'...:::::. ;;;' .' `;;;;;;'' ; `---------------------------- """ def load_ascii_art(name): inventory = { 'doge-horse': _doge_horse, 'dumb-horse': _dumb_horse, 'angry-horse': _angry_horse, } lines = inventory[name].split('\n') if not lines[0]: lines = lines[1:] if not lines[-1]: lines = lines[:-1] rows = len(lines) columns = max(len(line) for line in lines) return rows, columns, lines
[ "jurriaanbremer@gmail.com" ]
jurriaanbremer@gmail.com
dc2c2119263a0157cb7a145f69ea778a8e49e51b
ec84619271eac42481231218c9ee653dec99adad
/7. Set- Dictionary- Divide - Conquer/469. Same Tree.py
1cdd5a38d7f4a9d56672c1707ca8e0adaf4f8772
[]
no_license
LingHsiLiu/Algorithm0
19a968fffb5466022f9856c36af0364da6472434
f438e828dc9dd6196ee5809eb8fac21ccb688bf2
refs/heads/master
2020-04-04T17:55:48.182172
2019-01-02T19:06:57
2019-01-02T19:06:57
156,142,530
0
0
null
null
null
null
UTF-8
Python
false
false
811
py
# 469. Same Tree # Check if two binary trees are identical. Identical means the two binary trees have the same structure and every identical position has the same value. # Example # 1 1 # / \ / \ # 2 2 and 2 2 # / / # 4 4 # are identical. # 1 1 # / \ / \ # 2 3 and 2 3 # / \ # 4 4 # are not identical. """ Definition of TreeNode: class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None """ class Solution: """ @param a: the root of binary tree a. @param b: the root of binary tree b. @return: true if they are identical, or false. """ def isIdentical(self, a, b): # write your code here
[ "noreply@github.com" ]
LingHsiLiu.noreply@github.com
a3e5cbc9a8bec44766a705ec15e5e27f2d0c37de
9a0e2312236b628007a67c07164ea7b97207e47c
/col/apps/syslog_collector/tests/acceptance_tests/test_syslog_collector.py
330a9acb484bb4b76dbd0bf8fc1ddfe348b58f57
[]
no_license
laxmi518/network_project
d88b9fe73522deaa90c1dbfd22c6861020a6c7be
2e998338f3d1142a8098d3dfd35f4c8ad0e4ba00
refs/heads/master
2020-05-21T15:48:07.830107
2018-05-09T18:58:37
2018-05-09T18:58:37
84,631,818
0
0
null
null
null
null
UTF-8
Python
false
false
3,195
py
import os import time import unittest import socket import ssl from subprocess import Popen import re import gevent from pylib.wiring import gevent_zmq as zmq from nose.tools import eq_ from pylib import wiring, disk, conf, inet class test_syslog_collector(unittest.TestCase): os.environ["TZ"] = "UTC" zmq_context = zmq.Context() def setUp(self): # syslog collector forwards the received msg to normalizer_in # starting syslog collector config_path = disk.get_sibling(__file__, 'test-config.json') config = conf.load(config_path) self.port = config['port'] self.ssl_port = config['ssl_port'] self.normalizer = wiring.Wire('norm_front_in', zmq_context=self.zmq_context) self.syslog_collector = Popen(['python', 'syslog_collector.py', config_path]) # Allow to prepare for serving time.sleep(0.5) def tearDown(self): self.syslog_collector.kill() self.normalizer.close() time.sleep(0.5) def send_message(self, address=None, message=None, flow='udp'): address = address or ('127.0.0.1', self.port) message = message or "<124> May 06 2012 15:02:24 [emerg] (17)File exists: Couldn't create accept lock (/private/var/log/apache2/accept.lock.19) (5)\n" host, port = address if flow == 'tcp': client, sockaddr = inet.create_address(host, port) client.connect(sockaddr) client.send(message) elif flow == 'ssl': client, sockaddr = inet.create_address(host, port) client = ssl.wrap_socket(client) client.connect(sockaddr) client.send(message) elif flow == 'udp': client, sockaddr = inet.create_address(host, port, socket.SOCK_DGRAM) client.sendto(message, sockaddr) else: raise ValueError('Unknown flow type: %r' % flow) event = gevent.with_timeout(5, self.normalizer.recv, timeout_value=None) mid = event.pop('mid') assert re.match(r'^LogInspect500\|syslog\|(127.0.0.1|::1)\|\d+\|1$', mid) eq_(event, dict( msg=message.rstrip('\n'), severity=4, facility=15, log_ts=1336316544, device_ip=address[0], device_name='localhost', collected_at='LogInspect500', _type_num='log_ts severity facility', _type_str='msg device_name collected_at', _type_ip='device_ip', )) def test_tcp_basic_flow(self): self.send_message(flow='tcp') def test_ssl_flow(self): self.send_message(('127.0.0.1', self.ssl_port), flow='ssl') def test_udp_basic_flow(self): self.send_message(flow='udp') def test_tcp6_flow(self): self.send_message(('::1', self.port), flow='tcp') def test_ssl6_flow(self): self.send_message(('::1', self.ssl_port), flow='ssl') def test_udp6_flow(self): self.send_message(('::1', self.port), flow='udp') if __name__ == '__main__': import nose nose.run(defaultTest=__name__)
[ "laxmi.jhapa@gmail.com" ]
laxmi.jhapa@gmail.com
f58ebecf367af70681cae87983a4b286dcad25da
9cfaffd2e3fe06467d0e4f7e671e459b04d123ea
/extras/management/commands/updates.py
34dffa7918245bab780b244d8bb583ed6bc223f4
[]
no_license
montenegrop/djangotravelportal
80b72b9e3da517885b6d596fad34049545a598a5
8a15fc387d20b12d16c171c2d8928a9b9d4ba5e1
refs/heads/main
2023-01-29T22:12:58.633181
2020-12-05T15:44:39
2020-12-05T15:44:39
318,826,064
0
0
null
null
null
null
UTF-8
Python
false
false
4,405
py
from django.core.management.base import BaseCommand, CommandError from django.conf import settings from operators.models import QuoteRequest, TourOperator, Itinerary, ItineraryType from users.models import UserProfile import MySQLdb from django.db.models import Count from django.contrib.auth.models import User from places.models import Park, CountryIndex from photos.models import Photo from blog.models import Article from reviews.models import ParkReview, KilimanjaroParkReview, TourOperatorReview from analytics.models import Analytic class Command(BaseCommand): help = '' def handle(self, *args, **options): # update tour operators tour_operators = TourOperator.objects.all() #tour_operators = tour_operators.filter(slug='africaventure') for tour_operator in tour_operators: tour_operator.update_reviews_count() tour_operator.update_average_rating() tour_operator.update_parks_count() tour_operator.update_packages_count() tour_operator.update_quote_request_count() tour_operator.update_photos_count() tour_operator.update_yas_score() tour_operator.update_vehicle_rating() tour_operator.update_meet_and_greet_rating() tour_operator.update_responsiveness() tour_operator.update_safari_quality() tour_operator.update_itinerary_quality() tour_operator.update_packages_count() for country in tour_operator.country_indexes.all(): tour_operator.update_yas_score(country) print('Updated', tour_operators.count(), 'tour_operators') #activity_level itineraries = Itinerary.objects.filter(date_deleted=None) for itinerary in itineraries: itinerary.activity_level = itinerary.calc_max_activity_level() itinerary.activity_level_name = itinerary.calc_activity_level_string() itinerary.save() print('Updated', itineraries.count(), 'itineraries') # update country countries = CountryIndex.objects.all() for country in countries: country.update_packages_count() country.update_photos_count() country.update_parks_count() country.update_operators_count() print('Updated', countries.count(), 'countries') # update articles articles = Article.objects.all() for article in articles: article.update_kudu_count() article.update_visit_count() article.update_comments_count() print('Updated', articles.count(), 'articles') #parks parks = Park.objects.all() for park in parks: park.update_reviews_count() park.update_tour_operators_count() park.update_average_rating() park.update_packages_count() park.update_photos_count() print('Updated', parks.count(), 'parks') # update park reviews reviews = ParkReview.objects.all() for review in reviews: review.update_views_count() review.update_kudu_count() print('Updated', reviews.count(), 'park reviews') # update tour operator reviews reviews = TourOperatorReview.objects.all() for review in reviews: review.update_views_count() review.update_kudu_count() print('Updated', reviews.count(), 'tour op reviews') # update kilimanjaro reviews reviews = KilimanjaroParkReview.objects.all() for review in reviews: review.update_views_count() review.update_kudu_count() print('Updated', reviews.count(), 'kilimanjaro park reviews visit counts') objs = Itinerary.objects.all() for obj in objs: obj.update_visit_count() print('Updated', objs.count(), 'itinerary views') objs = UserProfile.objects.all() for obj in objs: obj.update_review_count() obj.update_kudus_count() print('Updated', objs.count(), 'users reviews and kudus') objs = Photo.objects.filter(date_deleted__isnull=False) for obj in objs: obj.update_kudu_count() print('Updated', objs.count(), 'photos') self.stdout.write(self.style.SUCCESS("DONE"))
[ "juan.crescente@gmail.com" ]
juan.crescente@gmail.com
f0e094eec95b0e2ba7dc77239adfc658f8b0f713
060e99a3935b08f3344f01d3af9a1bf322783b99
/OOP/encapsulation.py
0e99667c6a2accaffee794f206cc93fbe9c61a7b
[]
no_license
Lemmah/pyWorkSpace
a2119a6cd2d2695eeb18a1d41400b7fe97a41c70
ba176a9029f108c39d53970ff5127be7007555ee
refs/heads/master
2021-01-22T11:10:57.205835
2017-09-05T07:57:28
2017-09-05T07:57:28
92,673,469
0
0
null
null
null
null
UTF-8
Python
false
false
670
py
# Encapsulation: data hiding. Encapsulated variables cannot be accessed directly. class BankAccount: ''' This is a bank account class ''' def __init__(self, accountName="Current Account", balance=200): ''' Constructor with encapsulated attributes ''' self.__accountName = accountName self.__balance = balance def getBalance(self): return self.__balance accountObject = BankAccount() ''' If you did this, you will encounter errors... print(accountObject.__accountName) print(accountObject.__balance) ''' # Now, how do we get along? Use getters and setters accountObject = BankAccount() print(accountObject.getBalance())
[ "lemayiannakolah@gmail.com" ]
lemayiannakolah@gmail.com
bf9f50f49f5bbb9df18f6cfac06a5ea8d787c98f
143eb3ced0ff1f9cad745c620fcb572f72d66048
/Assignment4/atom3/Kernel/Qoca/runUnitTests.py
2c224b32ce916eeab7312760d919486bacf2576d
[]
no_license
pombreda/comp304
2c283c60ffd7810a1d50b69cab1d5c338563376d
d900f58f0ddc1891831b298d9b37fbe98193719d
refs/heads/master
2020-12-11T07:26:19.594752
2014-11-07T12:29:28
2014-11-07T12:29:28
35,264,549
1
1
null
2015-05-08T07:18:18
2015-05-08T07:18:18
null
UTF-8
Python
false
false
714
py
def runUnitTests(): import unittest suite = unittest.TestSuite() print 'NOTE: if import fails, try running it from a higher-level directory' print 'IE: ..\\atom3\\Kernel> python Qoca\\runUnitTests.py\n' from unittests.QocaBasicConstraints import QocaBasicConstraints suite.addTest(unittest.makeSuite(QocaBasicConstraints)) from unittests.pipeTest import PipeTest suite.addTest(unittest.makeSuite(PipeTest)) from unittests.QocaWrapperTest import QocaWrapperTest suite.addTest(unittest.makeSuite(QocaWrapperTest)) unittest.TextTestRunner(verbosity=2).run(suite) if __name__ == '__main__': runUnitTests()
[ "shankland@bigvikinggames.com" ]
shankland@bigvikinggames.com
d588825332a0ef69aeb97056aeff210c8bf6353d
ae7d5d11351af9201ce6181c48b8c60363c7ed00
/lib/galaxy/workflow/reports/generators/__init__.py
0fa46d4fb295ed41b79d33adcfbf98752944b597
[ "CC-BY-2.5", "AFL-2.1", "AFL-3.0", "CC-BY-3.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
natefoo/galaxy
818037d03f39ccfb3714c7e784fd64d7ad8f4d2e
64150c5bd803e75ed032e9f15acd003bae92b5ef
refs/heads/master
2023-08-17T02:57:02.580487
2020-03-26T13:33:01
2020-03-26T13:33:01
31,212,836
2
1
NOASSERTION
2019-04-25T12:30:28
2015-02-23T15:01:46
Python
UTF-8
Python
false
false
2,374
py
"""Module containing Galaxy workflow report generator plugins. """ from abc import ( ABCMeta, abstractmethod ) import six from galaxy.managers.markdown_util import ( internal_galaxy_markdown_to_pdf, ready_galaxy_markdown_for_export, resolve_invocation_markdown, ) @six.add_metaclass(ABCMeta) class WorkflowReportGeneratorPlugin(object): """ """ @property @abstractmethod def plugin_type(self): """Short string labelling this plugin.""" @abstractmethod def generate_report_json(self, trans, invocation, runtime_report_config_json=None): """ """ @abstractmethod def generate_report_pdf(self, trans, invocation, runtime_report_config_json=None): """ """ @six.add_metaclass(ABCMeta) class WorkflowMarkdownGeneratorPlugin(WorkflowReportGeneratorPlugin): """WorkflowReportGeneratorPlugin that generates markdown as base report.""" def generate_report_json(self, trans, invocation, runtime_report_config_json=None): """ """ internal_markdown = self._generate_internal_markdown(trans, invocation, runtime_report_config_json=runtime_report_config_json) export_markdown, extra_rendering_data = ready_galaxy_markdown_for_export(trans, internal_markdown) rval = { "render_format": "markdown", # Presumably the frontend could render things other ways. "markdown": export_markdown, "invocation_markdown": export_markdown, } rval.update(extra_rendering_data) return rval def generate_report_pdf(self, trans, invocation, runtime_report_config_json=None): internal_markdown = self._generate_internal_markdown(trans, invocation, runtime_report_config_json=runtime_report_config_json) return internal_galaxy_markdown_to_pdf(trans, internal_markdown, 'invocation_report') @abstractmethod def _generate_report_markdown(self, trans, invocation, runtime_report_config_json=None): """ """ def _generate_internal_markdown(self, trans, invocation, runtime_report_config_json=None): workflow_markdown = self._generate_report_markdown(trans, invocation, runtime_report_config_json=runtime_report_config_json) internal_markdown = resolve_invocation_markdown(trans, invocation, workflow_markdown) return internal_markdown
[ "jmchilton@gmail.com" ]
jmchilton@gmail.com
39336a1943b085ae0cdf21000d0b5ee2771f5e12
8c036299de04b1dd8edeabdd7b265beb4c16f64d
/WebMirror/management/rss_parser_funcs/feed_parse_extractGooseberrytlWordpressCom.py
69935050ff6657b86bdd6192cf29a7637182ef62
[ "BSD-3-Clause" ]
permissive
collegroup/ReadableWebProxy
f2dcc4ce4f32c461388f40890a2997d61b49b28a
bec24610dd52fde5311dfc9b9cb2b388e23727ec
refs/heads/master
2023-01-11T20:27:38.598545
2020-11-16T06:03:57
2020-11-16T06:03:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
642
py
def extractGooseberrytlWordpressCom(item): ''' Parser for 'gooseberrytl.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('tsats', 'The Star Around The Sun', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "something@fake-url.com" ]
something@fake-url.com
7f419201fc23a0742c1ddb00244df3d888d47e0e
7eaeb56a2ed19a30559dac8673a979fc64d76e8a
/tests/parsers/c_parser/stmts/if_stmt_tests.py
9ac34d7e32ec9b86aab0bdf3fcd755d5c1d2639d
[ "MIT" ]
permissive
avast/retdec-regression-tests-framework
95935b6a66bee66f58a9f2ea1296f747536aeaae
f8f43c0870df638d114f685a30f8abf8b51d6d1e
refs/heads/master
2023-05-30T18:52:37.332065
2022-12-05T14:37:40
2022-12-05T14:37:40
113,967,405
8
5
MIT
2020-04-07T12:28:40
2017-12-12T09:01:52
Python
UTF-8
Python
false
false
3,520
py
""" Tests for the :module`regression_tests.parsers.c_parser.stmts.if_stmt` module. """ from tests.parsers.c_parser import WithModuleTests from regression_tests.parsers.c_parser.stmts.if_stmt import IfStmt class IfStmtTests(WithModuleTests): """Tests for `IfStmt`.""" def get_if_stmt(self, code, func_name): """Returns the first if statement in the given code.""" func = self.get_func(""" void %s(void) { %s } """ % (func_name, code), func_name) return func.if_stmts[0] def test_if_stmt_is_if_stmt(self): if_stmt = self.get_if_stmt("if(1) bar();", 'foo') self.assertTrue(if_stmt.is_if_stmt()) def test_if_stmt_is_no_other_kind_of_statement(self): if_stmt = self.get_if_stmt("if(1) bar();", 'foo') self.assertFalse(if_stmt.is_for_loop()) self.assertFalse(if_stmt.is_assign()) self.assertFalse(if_stmt.is_var_def()) self.assertFalse(if_stmt.is_while_loop()) self.assertFalse(if_stmt.is_return_stmt()) self.assertFalse(if_stmt.is_empty_stmt()) self.assertFalse(if_stmt.is_break_stmt()) self.assertFalse(if_stmt.is_continue_stmt()) self.assertFalse(if_stmt.is_switch_stmt()) self.assertFalse(if_stmt.is_goto_stmt()) self.assertFalse(if_stmt.is_do_while_loop()) self.assertFalse(if_stmt.is_loop()) def test_identification_returns_correct_value(self): if_stmt = self.get_if_stmt("if(1) bar();", 'foo') self.assertEqual(if_stmt.identification, 'if(1)') def test_correct_condition_is_extracted(self): if_stmt = self.get_if_stmt("if(1) bar();", 'foo') self.assertEqual(if_stmt.condition, '1') def test_if_stmt_without_else_part_does_not_have_else_part(self): if_stmt = self.get_if_stmt("if(1) bar();", 'foo') self.assertFalse(if_stmt.has_else_clause()) def test_if_stmt_with_else_part_has_else_part(self): if_stmt = self.get_if_stmt(""" if(1) bar(); else foo(); """, 'foo') self.assertTrue(if_stmt.has_else_clause()) def test_if_stmt_is_equal_to_itself(self): if_stmt = self.get_if_stmt("if(1) bar();", 'foo') self.assertEqual(if_stmt, if_stmt) def test_two_different_if_stmts_are_not_equal(self): if_stmt1 = self.get_if_stmt("if(1) bar();", 'foo') if_stmt2 = self.get_if_stmt("if(1) foo();", 'foo') self.assertNotEqual(if_stmt1, if_stmt2) def test_two_if_stmts_with_same_string_representation_are_not_equal(self): if_stmt1 = self.get_if_stmt("if(1) foo();", 'foo') if_stmt2 = self.get_if_stmt("if(1) foo();", 'bar') self.assertNotEqual(if_stmt1, if_stmt2) def test_else_if_statement_is_new_if_statement_in_else_clause(self): parent_if_stmt = self.get_if_stmt(""" if(1) { bar(); } else if (2) { foo(); } """, 'foo') child_if_stmt = IfStmt(list(parent_if_stmt._node.get_children())[2]) self.assertEqual(child_if_stmt.condition, '2') self.assertFalse(child_if_stmt.has_else_clause()) def test_repr_returns_correct_repr(self): if_stmt = self.get_if_stmt("if(1) foo();", 'foo') self.assertEqual(repr(if_stmt), '<IfStmt condition=1>') def test_str_returns_correct_str(self): if_stmt = self.get_if_stmt("if(1) foo();", 'foo') self.assertEqual(str(if_stmt), 'if (1)')
[ "petr.zemek@avast.com" ]
petr.zemek@avast.com
a922c484299fcb82e4c30019d0fbefc8983bd0d4
1527d341ec0910426ffede6207232f885b3176a0
/source/HwSendEmail.py
8f481d74c8c323f21344e6c81f02ade8d73cdf80
[]
no_license
eddiewang-wgq/HwUnittestFrameworkPy2
47f55c56c3e2c61aa153beb9180fa8247164fdcc
dada7db244f66830ca5a06087822f0b6db6ee512
refs/heads/master
2023-03-28T14:55:54.086200
2021-03-30T08:34:25
2021-03-30T08:34:25
352,928,215
0
0
null
null
null
null
GB18030
Python
false
false
4,032
py
#!/usr/bin/env python # coding:gbk # Created by zhaohongwei on 2016-06-20 # Blog: http://blog.csdn.net/z_johnny from email.mime.multipart import MIMEMultipart from email.mime.application import MIMEApplication from email.utils import COMMASPACE from email.mime.text import MIMEText from email.mime.image import MIMEImage from email.mime.audio import MIMEAudio import smtplib import os import yaml class HwSendEmail(object): def __init__(self, email_title, email_content): """ init config """ with open('./config/configEmail.yaml' ,'rb') as config: self.allConfig = yaml.load(config) self.attachment_path = './result' self.email_title = email_title self.email_content = email_content self.smtp = smtplib.SMTP() self.login_username = self.allConfig['SMTP']['login_username'] self.login_password = self.allConfig['SMTP']['login_password'] self.sender = self.allConfig['SMTP']['login_username'] self.receiver = self.allConfig['SMTP']['receiver'] self.host = self.allConfig['SMTP']['host'] # self.port = self.allConfig['SMTP']['port'] 发现加入端口后有时候发邮件出现延迟,故暂时取消 def connect(self): """ connect server """ #self.smtp.connect(self.host, self.port) self.smtp.connect(self.host) def login(self): """ login email """ try: self.smtp.login(self.login_username, self.login_password) except: raise AttributeError('Can not login smtp!!!') def send(self): """ send email """ msg = MIMEMultipart() # create MIMEMultipart msg['From'] = self.sender # sender receiver = self.receiver.split(",") # split receiver to send more user msg['To'] = COMMASPACE.join(receiver) msg['Subject'] = self.email_title # email Subject content = MIMEText(self.email_content, _charset='gbk') # add email content ,coding is gbk, becasue chinese exist msg.attach(content) for attachment_name in os.listdir(self.attachment_path): attachment_file = os.path.join(self.attachment_path,attachment_name) with open(attachment_file, 'rb') as attachment: if 'application' == 'text': attachment = MIMEText(attachment.read(), _subtype='octet-stream', _charset='GB2312') elif 'application' == 'image': attachment = MIMEImage(attachment.read(), _subtype='octet-stream') elif 'application' == 'audio': attachment = MIMEAudio(attachment.read(), _subtype='octet-stream') else: attachment = MIMEApplication(attachment.read(), _subtype='octet-stream') attachment.add_header('Content-Disposition', 'attachment', filename = ('gbk', '', attachment_name)) # make sure "attachment_name is chinese" right msg.attach(attachment) self.smtp.sendmail(self.sender, receiver, msg.as_string()) # format msg.as_string() def quit(self): self.smtp.quit() def sendemail(self): self.connect() self.login() self.send() self.quit() if __name__ == "__main__": # from sendemail import SendEmail import time ISOTIMEFORMAT='_%Y-%m-%d_%A' current_time =str(time.strftime(ISOTIMEFORMAT)) email_config_path = './configEmail.yaml' # config path email_attachment_path = './result' # attachment path email_tiltle = 'johnny test'+'%s'%current_time # as johnny test_2016-06-20_Monday ,it can choose only file when add time email_content = 'python发送邮件测试,包含附件' myemail = HwSendEmail(email_config_path,email_attachment_path,email_tiltle, email_content) myemail.connect() myemail.login() myemail.send() myemail.quit()
[ "2568080700@qq.com" ]
2568080700@qq.com
4ed17d17fad5aa10e9004e2cd3e4b71e0b4eaa7f
e10c8dbd03117dcf71ae4c5e59863b9268cda514
/store/migrations/0015_auto_20200617_2230.py
cab6aa810e8b6c38461a407bd24f100c01c4f615
[]
no_license
linker10/pharmacy
c305eb8304057498ea06008f43715db682e88554
8cd30ca6f94f636f45400899f4a9f1c150af3bbf
refs/heads/master
2022-12-10T01:04:45.154055
2020-08-12T18:32:06
2020-08-12T18:32:06
276,040,272
0
0
null
null
null
null
UTF-8
Python
false
false
552
py
# Generated by Django 3.0.6 on 2020-06-17 17:30 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('store', '0014_auto_20200617_2227'), ] operations = [ migrations.AlterField( model_name='item', name='old_price', field=models.FloatField(blank=True, default=0, null=True), ), migrations.AlterField( model_name='item', name='price', field=models.FloatField(default=0), ), ]
[ "bilalsharif4@gmail.com" ]
bilalsharif4@gmail.com
b42a88f8b03d74ac056c53e343c01339be04c77e
692f77a160798b586f3ef1240c1bdf2bb114c9a0
/aiopening/__init__.py
1bd330e0d468a5d48aabf994e80d57f1a099b472
[ "MIT" ]
permissive
ducandu/aiopening
5e16f8240a527da37d622b5445b68083d4fba1e4
214d8d6dfc928ab4f8db634018092dc43eaf0e3c
refs/heads/master
2022-12-30T14:58:45.669350
2017-09-06T09:47:57
2017-09-06T09:47:57
93,327,068
0
0
null
null
null
null
UTF-8
Python
false
false
677
py
""" --------------------------------------------------------------------------------- shine - [s]erver [h]osted [i]ntelligent [n]eural-net [e]nvironment :) --------------------------------------------------------------------------------- by Code Sourcerer (c) 2017 ducandu GmbH """ # "global" classes (that should live in the ai. namespace directly) from aiopening.labs import Lab from aiopening.experiments import Experiment from aiopening.models import Model # make sure these are available without having to specify them as separate imports import aiopening.modules import aiopening.envs import aiopening.algorithms # global pack vars _VERSION = 1 # 00.00.01 = 1
[ "svenmika1977@gmail.com" ]
svenmika1977@gmail.com
ec9155714dc595fe99a631eee20e6a23e915fb67
ef821468b081ef2a0b81bf08596a2c81e1c1ef1a
/Python OOP/Decorators-LAB/Vowel_Filter.py
02cd267e9c2e4dadb5ddf5db5dd6fce9504be66b
[]
no_license
Ivaylo-Atanasov93/The-Learning-Process
71db22cd79f6d961b9852f140f4285ef7820dd80
354844e2c686335345f6a54b3af86b78541ed3f3
refs/heads/master
2023-03-30T20:59:34.304207
2021-03-29T15:23:05
2021-03-29T15:23:05
294,181,544
0
0
null
null
null
null
UTF-8
Python
false
false
324
py
def vowel_filter(function): def wrapper(): vowels = ['a', 'o', 'u', 'e', 'i', 'y'] result = function() result = [letter for letter in result if letter in vowels] return result return wrapper @vowel_filter def get_letters(): return ["a", "b", "c", "d", "e"] print(get_letters())
[ "ivailo.atanasov93@gmail.com" ]
ivailo.atanasov93@gmail.com
a0f94082909743fec98edbe78c3ed3b4b1dcec26
37fe0d74375527f4aaf86857e17b96b675837205
/aid1805/MongoDB/grid.py
9f413334eb1c346fa4c34264e5f642ac4adafd87
[]
no_license
wangleiliugang/data
af8255eb76affa55424979c809c6168a7f3995ea
375a58b454be38ffa156876a7770f8d6f4345aba
refs/heads/master
2023-06-05T13:21:43.630854
2021-06-21T09:04:39
2021-06-21T09:04:39
378,862,494
0
0
null
null
null
null
UTF-8
Python
false
false
831
py
# 获取数据库中gridfs文件 from pymongo import MongoClient # 和pymongo模块是绑定在一起的 import gridfs # 1.创建mongo的连接对象 conn = MongoClient('localhost', 27017) # 2.数据库mygrid不存在则自动创建 db = conn.mygrid # 3.获取gridfs对象 fs = gridfs.GridFS(db) # 4.得到迭代对象 files = fs.find() # print(files) # print(files.count()) # files为可迭代对象,每个迭代值代表一个存入文件的对象,通过对象的属性可以获取文件信息 # for file in files: # print(file.filename) for file in files: with open(file.filename, 'wb') as f: while True: # file对象有read接口,可以直接从数据库读取内容 data = file.read(2048) if not data: break f.write(data) conn.close()
[ "137753633@qq.com" ]
137753633@qq.com
ac4e06fa4a67e9af197caba2daa92a5f5e08fb37
5edd3d54b9fb7ef685d7760e03391307374dee73
/web_flask/100-hbnb.py
de39873740c50d7b60a5c758d756af7a171a7bcf
[]
no_license
PierreBeaujuge/AirBnB_clone_v2
3df331aea025f8b216a705bd66bd5203a3b34ec9
910d04c08a5f833cd71754a62e74e3b81c601ba2
refs/heads/master
2020-11-23T21:30:55.362761
2020-10-08T07:37:22
2020-10-08T07:37:22
227,829,165
0
3
null
2019-12-20T09:10:49
2019-12-13T11:52:51
Python
UTF-8
Python
false
false
767
py
#!/usr/bin/python3 """ Script that starts a Flask web application """ from flask import Flask from flask import render_template from models import storage app = Flask(__name__) @app.route('/hbnb', strict_slashes=False) def hbnb(): """view function that displays [...]""" all_states = storage.all("State").values() all_amenities = storage.all("Amenity").values() all_places = storage.all("Place").values() return render_template('100-hbnb.html', all_states=all_states, all_amenities=all_amenities, all_places=all_places) @app.teardown_appcontext def teardown(self): """function that removes the current SQLAlchemy Session""" storage.close() if __name__ == "__main__": app.run(host='0.0.0.0', port=5000)
[ "pierre.beaujuge@gmail.com" ]
pierre.beaujuge@gmail.com
0d0195538784ac551b4ef042046c3d82a141aaf8
2775a8306052e727b9a602c7906e64ee44cb4d80
/dictionaria/scripts/initializedb.py
6c46022df940c053822222cf7ed1fffad039bcb9
[ "Apache-2.0" ]
permissive
pombredanne/dictionaria
f40b45adb93b0733d1c047c338e15e834a2aa6b3
9668129e9b856fc5e8e78e15dacb1037621cbeb6
refs/heads/master
2021-01-14T14:16:39.221190
2015-11-25T22:22:06
2015-11-25T22:22:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,846
py
from __future__ import unicode_literals from datetime import date import transaction from nameparser import HumanName from sqlalchemy.orm import joinedload_all, joinedload from clldutils.misc import slug from clld.util import LGR_ABBRS from clld.scripts.util import Data, initializedb from clld.db.meta import DBSession from clld.db.models import common from clldclient.concepticon import Concepticon from clld_glottologfamily_plugin.util import load_families import dictionaria from dictionaria.models import ComparisonMeaning, Dictionary, Word, Variety from dictionaria.lib.submission import REPOS, Submission def main(args): data = Data() dataset = common.Dataset( id=dictionaria.__name__, name="Dictionaria", description="The Dictionary Journal", published=date(2015, 10, 1), contact='dictionaria@eva.mpg.de', domain='dictionaria.clld.org', license="http://creativecommons.org/licenses/by/4.0/", jsondata={ 'license_icon': 'cc-by.png', 'license_name': 'Creative Commons Attribution 4.0 International License'}) ed = data.add( common.Contributor, 'hartmanniren', id='hartmanniren', name='Iren Hartmann') common.Editor(dataset=dataset, contributor=ed) DBSession.add(dataset) for id_, name in LGR_ABBRS.items(): DBSession.add(common.GlossAbbreviation(id=id_, name=name)) comparison_meanings = {} comparison_meanings_alt_labels = {} print('loading concepts ...') concepticon = Concepticon() for i, concept_set in enumerate(concepticon.resources('parameter').members): concept_set = concepticon.resource(concept_set) cm = ComparisonMeaning( id=concept_set.id, name=concept_set.name.lower(), description=concept_set.description, concepticon_url='%s' % concept_set.uriref) DBSession.add(cm) comparison_meanings[cm.name] = cm for label in concept_set.alt_labels: comparison_meanings_alt_labels.setdefault(label.lower(), cm) DBSession.flush() print('... done') comparison_meanings = {k: v.pk for k, v in comparison_meanings.items()} comparison_meanings_alt_labels = { k: v.pk for k, v in comparison_meanings_alt_labels.items()} submissions = [] for submission in REPOS.joinpath('submissions').glob('*'): if not submission.is_dir(): continue try: submission = Submission(submission) except ValueError: continue md = submission.md id_ = submission.id lmd = md['language'] language = data['Variety'].get(lmd['glottocode']) if not language: language = data.add( Variety, lmd['glottocode'], id=lmd['glottocode'], name=lmd['name']) dictionary = data.add( Dictionary, id_, id=id_, name=lmd['name'] + ' Dictionary', language=language, published=date(*map(int, md['published'].split('-')))) for i, cname in enumerate(md['authors']): name = HumanName(cname) cid = slug('%s%s' % (name.last, name.first)) contrib = data['Contributor'].get(cid) if not contrib: contrib = data.add(common.Contributor, cid, id=cid, name=cname) DBSession.add(common.ContributionContributor( ord=i + 1, primary=True, contributor=contrib, contribution=dictionary)) submissions.append((dictionary.id, language.id, submission)) transaction.commit() for did, lid, submission in submissions: try: mod = __import__( 'dictionaria.loader.' + submission.id, fromlist=['MARKER_MAP']) marker_map = mod.MARKER_MAP except ImportError: marker_map = {} transaction.begin() print('loading %s ...' % submission.id) submission.load( did, lid, comparison_meanings, comparison_meanings_alt_labels, marker_map) transaction.commit() print('... done') #('hoocak', 'Hooca\u0328k', 43.5, -88.5, [('hartmanniren', 'Iren Hartmann')]), #('yakkha', 'Yakkha', 27.37, 87.93, [('schackowdiana', 'Diana Schackow')]), #('palula', 'Palula', 35.51, 71.84, [('liljegrenhenrik', 'Henrik Liljegren')], {}), #('daakaka', 'Daakaka', -16.27, 168.01, [('vonprincekilu', 'Kilu von Prince')], # {'published': date(2015, 9, 30), 'iso': 'bpa', 'glottocode': 'daka1243'}), #('teop', 'Teop', -5.67, 154.97, [('moselulrike', 'Ulrike Mosel')], # {'published': date(2015, 9, 30), 'iso': 'tio', 'glottocode': 'teop1238', 'encoding': 'latin1'}), transaction.begin() load_families(Data(), DBSession.query(Variety)) def prime_cache(cfg): """If data needs to be denormalized for lookup, do that here. This procedure should be separate from the db initialization, because it will have to be run periodiucally whenever data has been updated. """ for meaning in DBSession.query(ComparisonMeaning).options( joinedload_all(common.Parameter.valuesets, common.ValueSet.values) ): meaning.representation = sum([len(vs.values) for vs in meaning.valuesets]) if meaning.representation == 0: meaning.active = False for word in DBSession.query(Word).options(joinedload(Word.meanings)): word.description = ' / '.join(m.name for m in word.meanings if m.language == 'en') for d in DBSession.query(Dictionary).options(joinedload(Dictionary.words)): d.count_words = len(d.words) if __name__ == '__main__': initializedb(create=main, prime_cache=prime_cache)
[ "xrotwang@googlemail.com" ]
xrotwang@googlemail.com
cb325b725cf13cd0684e1a897ad69aa6f2113cf7
f84624d2f04c730e411e265e0a2fd97b6cfe6107
/anomaly_detection/CIFAR/run_cifar_dec.py
01915d1b0e3b643c119627d251a7659982825755
[ "Apache-2.0" ]
permissive
thu-spmi/Inclusive-NRF
1326cac36140f71bc05f4f71cd35a6024a97b394
e4e6ae6edca8f8d11a51f649609a8f7675d22f99
refs/heads/main
2023-01-20T01:22:34.444134
2020-11-21T13:03:02
2020-11-21T13:03:02
314,812,840
3
0
null
null
null
null
UTF-8
Python
false
false
2,386
py
import argparse parser = argparse.ArgumentParser() parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--lrd', type=float, default=1e-3) parser.add_argument('--lrg', type=float, default=1e-3) parser.add_argument('--loss', type=str, default='hinge') parser.add_argument('--gpu', default='0' ,type=str) parser.add_argument('--opt', type=str, default='rms') parser.add_argument('--gw', default=1.0 ,type=float) parser.add_argument('--L', default=10 ,type=int) parser.add_argument('--fxp', default=0.1 ,type=float) parser.add_argument('--del_we', default=1 ,type=float) parser.add_argument('--max_e', default=100 ,type=int) parser.add_argument('--alpha', default=0 ,type=float) parser.add_argument('--eta', default=0.03,type=float) parser.add_argument('--sf', type=str, default='') parser.add_argument('--load', type=str, default='') parser.add_argument('--cof', default=0,type=float) parser.add_argument('--sig', default=0,type=float) parser.add_argument('--seed', type=int, default=1) parser.add_argument('--method', type=int, default=1) parser.add_argument('--no', type=str, default='0') #表示训练集只使用某个标签 args = parser.parse_args() print(args) if __name__ == '__main__': import pickle import numpy as np import os, sys import cifar_dec if not os.path.exists('cifar_result'): os.mkdir('cifar_result') if args.method==1: # 对给定的一个标签与seed进行实验 args.no=int(args.no) best_score=cifar_dec.main(args.no,args.seed,args) print("num:",args.no,"seed:",args.seed,"best:",best_score) sys.stdout.flush() elif args.method==2: #对给定的一组标签以及seed 1-10进行实验 if os.path.exists('cifar_result/cifar_nrf_dec_%s'%args.sf): b_s=pickle.load(open('cifar_result/cifar_nrf_dec_%s'%args.sf,'rb' )) else:b_s=np.zeros((10,10)) num_all=[int(num) for num in args.no.split(',')] for num in num_all: for seed in range(1,11): best_score=cifar_dec.main(num,seed,args) print("num:",num,"seed:",seed,"best:",best_score) sys.stdout.flush() b_s[num,seed-1]=best_score print(b_s) sys.stdout.flush() pickle.dump(b_s,open('cifar_result/cifar_nrf_dec_%s'%args.sf,'wb' )) print(np.mean(b_s,1))
[ "maxwellzh@outlook.com" ]
maxwellzh@outlook.com
093dc3c5b815c2667c195e70c4bc2fd0a494f163
fb909b0716f62ae118afa7d505cbcbd28f62bc63
/main/migrations/0077_auto_20201010_0437.py
8b774df3242d4e5b429aa27ceafc4c8b4dc7bc60
[]
no_license
dkalola/JustAsk-Final
a5b951462cd3c88eb84320bb8fcf10c32f959090
c2e7c2ffae4d3c2d870d5ba5348a6bae62db5319
refs/heads/main
2023-05-24T16:02:17.425251
2021-06-16T19:33:52
2021-06-16T19:33:52
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,885
py
# Generated by Django 3.1.1 on 2020-10-10 04:37 import datetime from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('main', '0076_auto_20201010_0435'), ] operations = [ migrations.AlterField( model_name='buybook', name='EndDate', field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 340261), null=True, verbose_name='End Date of Rental book'), ), migrations.AlterField( model_name='buybook', name='StartDate', field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 340234), null=True, verbose_name='Start Date Rental book'), ), migrations.AlterField( model_name='paper', name='Date', field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 339646), null=True, verbose_name='Date Of Paper'), ), migrations.AlterField( model_name='question', name='qid', field=models.CharField(default='NEPGMHEF', max_length=8, unique=True, verbose_name='Question ID'), ), migrations.AlterField( model_name='student', name='EndDate', field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 330236), null=True, verbose_name='End Date of Subscription'), ), migrations.AlterField( model_name='student', name='StartDate', field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 330199), null=True, verbose_name='Start Date of Subscription'), ), ]
[ "divyanshukalola88@gmail.com" ]
divyanshukalola88@gmail.com
b29b703f1530373509e91bc7c4ff79b5dd754d1a
71e43068e82c91acbb3849169d1723f1375ac27f
/talon_one/models/feature_flag.py
a4e731d55e6fae5e31d2af2b4a4e26d311e5dfcb
[ "MIT" ]
permissive
talon-one/talon_one.py
aa08a1dbddd8ea324846ae022e43d441c57028f6
917dffb010e3d3e2f841be9cccba5bba1ea6c5c3
refs/heads/master
2023-05-11T18:50:00.041890
2023-05-03T20:17:39
2023-05-03T20:17:39
79,575,913
1
7
MIT
2023-05-03T15:10:14
2017-01-20T16:29:46
Python
UTF-8
Python
false
false
6,527
py
# coding: utf-8 """ Talon.One API Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you access the Campaign Manager at `https://yourbaseurl.talon.one/`, the URL for the [updateCustomerSessionV2](https://docs.talon.one/integration-api#operation/updateCustomerSessionV2) endpoint is `https://yourbaseurl.talon.one/v2/customer_sessions/{Id}` # noqa: E501 The version of the OpenAPI document: Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from talon_one.configuration import Configuration class FeatureFlag(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'value': 'str', 'created': 'datetime', 'modified': 'datetime' } attribute_map = { 'name': 'name', 'value': 'value', 'created': 'created', 'modified': 'modified' } def __init__(self, name=None, value=None, created=None, modified=None, local_vars_configuration=None): # noqa: E501 """FeatureFlag - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self._value = None self._created = None self._modified = None self.discriminator = None self.name = name self.value = value if created is not None: self.created = created if modified is not None: self.modified = modified @property def name(self): """Gets the name of this FeatureFlag. # noqa: E501 The name of the feature flag. # noqa: E501 :return: The name of this FeatureFlag. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this FeatureFlag. The name of the feature flag. # noqa: E501 :param name: The name of this FeatureFlag. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501 raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name @property def value(self): """Gets the value of this FeatureFlag. # noqa: E501 The value of the feature flag. # noqa: E501 :return: The value of this FeatureFlag. # noqa: E501 :rtype: str """ return self._value @value.setter def value(self, value): """Sets the value of this FeatureFlag. The value of the feature flag. # noqa: E501 :param value: The value of this FeatureFlag. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501 raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501 self._value = value @property def created(self): """Gets the created of this FeatureFlag. # noqa: E501 The time this entity was last created. # noqa: E501 :return: The created of this FeatureFlag. # noqa: E501 :rtype: datetime """ return self._created @created.setter def created(self, created): """Sets the created of this FeatureFlag. The time this entity was last created. # noqa: E501 :param created: The created of this FeatureFlag. # noqa: E501 :type: datetime """ self._created = created @property def modified(self): """Gets the modified of this FeatureFlag. # noqa: E501 The time this entity was last modified. # noqa: E501 :return: The modified of this FeatureFlag. # noqa: E501 :rtype: datetime """ return self._modified @modified.setter def modified(self, modified): """Sets the modified of this FeatureFlag. The time this entity was last modified. # noqa: E501 :param modified: The modified of this FeatureFlag. # noqa: E501 :type: datetime """ self._modified = modified def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, FeatureFlag): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, FeatureFlag): return True return self.to_dict() != other.to_dict()
[ "noreply@github.com" ]
talon-one.noreply@github.com
278e1903f4c212e40feb622879c936eec4220cdb
6ee1a53f2da154008a79df40a5a2cb9f77d36409
/study/threading_learn.py
8f168445e1d388a3755b4c721ff677314e04f653
[]
no_license
zuoguagua/demo
939f219c29cf0eae6c08c96cd578f1e566819243
504a551a5153848a9a173d9925132caea9806c25
refs/heads/master
2021-01-18T15:06:13.667246
2016-01-18T09:09:41
2016-01-18T09:09:41
44,574,876
0
0
null
null
null
null
UTF-8
Python
false
false
1,393
py
#!/usr/bin/env python import logging import Queue import threading def func_a(a,b): return a+b def func_b(): pass def func_c(a,b,c): return a,b,c _task_queue = Queue.Queue() def async_call(function,callback,*args,**kwargs): _task_queue.put({ 'function':function, 'callback':callback, 'args':args, 'kwargs':kwargs }) def _task_queue_consumer(): while True: try: task = _task_queue.get() function = task.get('function') callback = task.get('callback') args = task.get('args') kwargs = task.get('kwargs') try: if callback: callback(function(*args,**kwargs)) except Exception as ex: if callback: callback(ex) finally: _task_queue.task_done() except Exception as ex: logging.warning(ex) def handle_result(result): print(type(result),result) if __name__ == "__main__": t = threading.Thread(target=_task_queue_consumer) t.daemon = True t.start() async_call(func_a,handle_result,1,2) async_call(func_b,handle_result) async_call(func_c,handle_result,1,2,3) async_call(func_c,handle_result,1,2,3,4) _task_queue.join()
[ "root@localhost.localdomain" ]
root@localhost.localdomain
c558f48d172a81752e398f010623632a3e38e65e
3bdcb60b0bffeeb6ff7b0ddca4792b682158bb12
/4.2.9-AnidamientoDeEstructuras.py
8a1e89b2e21ae68af8c778aa5476bdd7cc891382
[]
no_license
FrankCasanova/Python
03c811801ec8ecd5ace66914f984a94f12befe06
03f15100991724a49437df3ce704837812173fc5
refs/heads/master
2023-05-23T01:37:12.632204
2021-06-10T15:20:38
2021-06-10T15:20:38
278,167,039
0
0
null
null
null
null
UTF-8
Python
false
false
273
py
# ejemplo limit = int(input('Dame un número: ')) for num in range(2, limit): primo = True for divisor in range(2, num): if num % divisor == 0: primo = False break if primo: print('el número {0} es primo'.format(num))
[ "frankcasanova.info@gmail.comm" ]
frankcasanova.info@gmail.comm
c7dcaec059d5297db4608373da3174aaf6d96ac2
7343194126b632ff5ac76fa3291de9ecf5b53e38
/lib/carbon/tests/benchmark_routers.py
cc29ac29d0e725f8de9bab5434a58880bc3a0df7
[ "Apache-2.0" ]
permissive
zillow/carbon
a885f226347d66cebe8dda33573a1efbc44e3078
07244f98e8ddf305a0b2cc2da1bcc1a86b613ce6
refs/heads/master
2020-12-26T00:46:14.220907
2019-10-09T02:23:40
2019-10-09T02:23:40
46,576,478
0
0
Apache-2.0
2019-10-09T02:23:41
2015-11-20T17:24:21
Python
UTF-8
Python
false
false
2,476
py
import os import timeit from carbon.routers import DatapointRouter from test_routers import createSettings REPLICATION_FACTORS = [1, 4] DIVERSE_REPLICAS = [True, False] N_DESTINATIONS = [1, 16, 32, 48] def print_stats(r, t): usec = t * 1e6 msec = usec / 1000 text = " %s %s datapoints: %d" % (r.plugin_name, r.__id, r.__count) if usec < 1000: text += " usecs: %d" % int(usec) elif msec < 1000: text += " msecs: %d" % int(msec) else: sec = msec / 1000 text += " secs: %3g" % sec print text def generateDestinations(n): for i in xrange(n): host_id = i % 10 instance_id = i port = 2000 + i yield ('carbon%d' % host_id, port, instance_id) def benchmark(router_class): for replication_factor in REPLICATION_FACTORS: for diverse_replicas in DIVERSE_REPLICAS: for n_destinations in N_DESTINATIONS: destinations = list(generateDestinations(n_destinations)) settings = createSettings() settings['REPLICATION_FACTOR'] = replication_factor settings['DIVERSE_REPLICAS'] = diverse_replicas settings['DESTINATIONS'] = destinations router = router_class(settings) router.__count = 0 # Ugly hack for timeit ! router.__id = ( ' deplication_factor: %d' % replication_factor + ' diverse_replicas: %d' % diverse_replicas + ' n_destinations: %-5d' % n_destinations) settings.DESTINATIONS = [] for destination in destinations: router.addDestination(destination) settings.DESTINATIONS.append( '%s:%s:%s' % ( destination[0], destination[1], destination[2])) benchmark_router(router) def benchmark_router(router): def router_getDestinations(): router.__count += 1 dst = list(router.getDestinations('foo.%d' % router.__count)) assert(len(dst) != 0) n = 100000 t = timeit.timeit(router_getDestinations, number=n) print_stats(router, t) def main(): for router_class in DatapointRouter.plugins.values(): # Skip 'rules' because it's hard to mock. if router_class.plugin_name == 'rules': continue benchmark(router_class) if __name__ == '__main__': main()
[ "c.chary@criteo.com" ]
c.chary@criteo.com
fbf9692e45b4994a8d39f8cbc34f41bf1bb692ae
d94b6845aeeb412aac6850b70e22628bc84d1d6d
/active_selective_prediction/sampling_methods/__init__.py
f6665d3dc1794f678e706514bb12be34006dff71
[ "CC-BY-4.0", "Apache-2.0" ]
permissive
ishine/google-research
541aea114a68ced68736340e037fc0f8257d1ea2
c1ae273841592fce4c993bf35cdd0a6424e73da4
refs/heads/master
2023-06-08T23:02:25.502203
2023-05-31T01:00:56
2023-05-31T01:06:45
242,478,569
0
0
Apache-2.0
2020-06-23T01:55:11
2020-02-23T07:59:42
Jupyter Notebook
UTF-8
Python
false
false
1,496
py
# coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Import sampling methods.""" from active_selective_prediction.sampling_methods.average_kl_divergence_sampling import AverageKLDivergenceSampling from active_selective_prediction.sampling_methods.average_margin_sampling import AverageMarginSampling from active_selective_prediction.sampling_methods.badge_sampling import BADGESampling from active_selective_prediction.sampling_methods.clue_sampling import CLUESampling from active_selective_prediction.sampling_methods.confidence_sampling import ConfidenceSampling from active_selective_prediction.sampling_methods.entropy_sampling import EntropySampling from active_selective_prediction.sampling_methods.kcenter_greedy_sampling import KCenterGreedySampling from active_selective_prediction.sampling_methods.margin_sampling import MarginSampling from active_selective_prediction.sampling_methods.uniform_sampling import UniformSampling
[ "copybara-worker@google.com" ]
copybara-worker@google.com
7c33a2cb3128b036c61416554cb4d258e4f256dd
740b88ae1307d159fb7f39c455a295155c94d58f
/main.py
a03d136942c1668a9b1e67eeb4a5d60b8dcd520b
[]
no_license
hosmanadam/quote-scraping-game
4ed578320d3c6872711dadc9f312b7acc6e071c0
5d4ecd5f3e2137127190971cfc4da6447f71ffc6
refs/heads/master
2020-04-03T10:05:27.242294
2019-05-14T11:08:47
2019-05-14T11:08:47
155,183,546
0
0
null
2018-10-31T23:48:51
2018-10-29T09:16:20
Python
UTF-8
Python
false
false
5,892
py
import os import unicodedata from csv import DictReader, DictWriter from random import choice from time import sleep import regex from pyfiglet import figlet_format from termcolor import colored import scraper import ui from classes.BadQuoteError import BadQuoteError PRINT_DELAY = 1 CRAWL_DELAY = 1 def essentialize(full_name): """Return the "essence" of a person's name, for fair comparison - strip whitespace, make lower case - remove any middle names - remove punctuation & accents (diacritical marks) Examples: (1) `' Emily Jane Brontë'` → `'emilybronte'` (2) `'J.R.R. Tolkien'` → `'jtolkien'` """ names = full_name.strip().lower().replace('.', '. ').split(' ') no_middle = names[0] if len(names) > 1: no_middle += names[-1] no_punctuation = ''.join(char for char in no_middle if char not in " '.-") no_accents = unicodedata.normalize('NFKD', no_punctuation).encode('ASCII', 'ignore').decode() return no_accents def is_fuzzy_match(a, b): """Return `True` if string `a` is "basically the same" as string `b`, else `False` - fuzzy string matching - allows 1 mistake for every 6 characters in `a`, but at least 1 - mistake may be insertion, deletion, or substitution """ fuzzlimit = round(len(a)/6) or 1 fuzzy = fr'(?:{b}){{i,d,s,e<={fuzzlimit}}}' return bool(regex.fullmatch(fuzzy, a)) def redact_author_description(author_description, author_name): """Return text with all appearences of author's name replaced with name-length blocks of `'█'`""" for name in author_name.split(' '): author_description = author_description.replace(name, '█'*len(name)) return author_description def _give_hint(i, quote): """Return `i`th hint for given quote.""" author_first = quote['author_name'].split(' ')[0] author_last = quote['author_name'].split(' ')[-1] author_description_redacted = redact_author_description(quote['author_description'], quote['author_name']) hints = [ colored("\nGuess who this quote is from", attrs=['underline']) + f":\n{ui.format_text_block(quote['text'])}", colored("Hint", attrs=['underline']) + f": the author was born on {quote['author_born_date']} {quote['author_born_location']}!", colored("Hint", attrs=['underline']) + f": the author's first name begins with the letter '{author_first[0]}'!", colored("Hint", attrs=['underline']) + f": the author's last name begins with the letter '{author_last[0]}'!", colored("Hint", attrs=['underline']) + f": here's some more stuff about the author...\n\n{ui.format_text_block(author_description_redacted)}\n", ] return hints[i] def _scrape_and_save(): quotes = scraper.get_quotes(crawl_delay=CRAWL_DELAY, crawl_stop=10) _save_to_csv(quotes) return quotes def _save_to_csv(quotes): with open('quotes.csv', 'w') as file: DW_object = DictWriter(file, fieldnames=quotes[0].keys()) DW_object.writeheader() DW_object.writerows(quotes) def _load_from_csv(): with open('quotes.csv') as file: DR_object = DictReader(file) return [row for row in DR_object] def _pick_quote(quotes): """Return random quote updated with author details, or `None` if details are N/A""" quote = quotes.pop(choice(range(len(quotes)))) try: quote.update(scraper.get_quote_details(quote['author_href'])) return quote, quotes except: sleep(CRAWL_DELAY) return None, quotes def ask_to_play(): """Ask user to play again, and return `True` or `False` depending on answer""" wants_to_play = input("\nWould you like to keep playing? (y/n) ") if not wants_to_play or wants_to_play[0].lower() not in 'yn': return ask_to_play() if wants_to_play[0].lower() == 'y': return True return False def enforce_working_directory(): """Sets working directory to the folder this .py file is in""" os.chdir(os.sys.path[0]) def play_round(quotes, total_guesses): """Selects a quote using _pick_quote(). Conducts a round of the game using _give_hint().""" quote = {} while not quote: quote, quotes = _pick_quote(quotes) os.system('clear') print(f"Number of remaining quotes: {len(quotes)}") sleep(PRINT_DELAY) for i in range(total_guesses): print(_give_hint(i, quote)) guess = input(colored("Your guess: ", attrs=['bold'])) if is_fuzzy_match(essentialize(guess), essentialize(quote['author_name'])): print(colored("\nYou win!", 'magenta', attrs=['bold'])) sleep(PRINT_DELAY) break elif i < total_guesses-1: print(f"\nThat's not the one. {total_guesses-1-i} guesses left!") else: print(colored("\nSorry, you lose!", 'red'), end='') sleep(PRINT_DELAY) print(f" (The author is {quote['author_name']}.)") sleep(PRINT_DELAY) return quotes def scrape_or_load(): """Scrape web for quotes or load them from CSV - scrape without asking if there's no CSV - user can choose otherwise """ if not os.path.exists('quotes.csv'): return _scrape_and_save() wants_to_scrape = input("Would you like to scrape the web to update your quotes before playing? (y/n) ") if not wants_to_scrape or wants_to_scrape[0].lower() not in 'yn': return scrape_or_load() if wants_to_scrape[0].lower() == 'y': return _scrape_and_save() if wants_to_scrape[0].lower() == 'n': return _load_from_csv() def main(): os.system('clear') print(colored((figlet_format("< Quote game \\>")), 'green', attrs=['bold'])) enforce_working_directory() quotes = scrape_or_load() total_guesses = 5 # max.5 unless more hints are added in _give_hint() wants_to_play = True while wants_to_play: quotes = play_round(quotes, total_guesses) if quotes: wants_to_play = ask_to_play() else: print(colored("\nALL OUT OF QUOTES.", attrs=['bold'])) break print(colored("\nThanks for playing. Bye!\n", attrs=['bold'])) sleep(PRINT_DELAY) if __name__ == '__main__': main()
[ "github@adamsinbox.com" ]
github@adamsinbox.com
c6a432ee79a97806ef61dc314d50de52ccaa5959
61004e474b7b2ad0071c16766f0f7874f04f9466
/examples/dataflow-production-ready/python/ml_preproc/pipeline/beam_classes/clean_records.py
455ff7e92e93510847b84dce6bfda5dffe4fa050
[ "Apache-2.0" ]
permissive
GoogleCloudPlatform/professional-services
eb79751efae765a8c691a745e520f44f51bd715c
0f51121b945bd74c7f667e74e8861fceda87565c
refs/heads/main
2023-09-05T02:57:33.328973
2023-08-30T14:40:30
2023-08-30T14:40:30
91,730,359
2,626
1,381
Apache-2.0
2023-09-14T20:13:42
2017-05-18T19:29:27
Python
UTF-8
Python
false
false
2,265
py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Iterable, Dict from apache_beam import DoFn from ..model.data_classes import Record from ..features import clean_input class CleanAndTransfToDictDoFn(DoFn): def __init__(self, *unused_args, **unused_kwargs): super().__init__(*unused_args, **unused_kwargs) def process(self, element: Record, abbrev: Dict) -> Iterable[Dict]: ## In this process method we are going to change element. But BEWARE: in Beam, the process method should not ## mutate the input object, it should produce a new object. ## Thankfully for us, named tuples (Record is a named tuple) are immutable; an AttributeError exception ## will be triggered if we try to modify element. ## So let's make a copy as a dict, and then we will return the dictionary. ## ## The transform to dictionary is necessary for two reasons: ## * We will need dicts to write to BigQuery ## * We are going to add some new columns/fields, with the similarity values # The _asdict method starts with _ to avoid potential conflicts with the named tuple field names # (its use is not restricted) mutable_element = element._asdict() ## source and target address mutable_element['source_address'] = clean_input.clean_text(element.source_address, abbrev) mutable_element['target_address'] = clean_input.clean_text(element.target_address, abbrev) ## source and target city mutable_element['source_city'] = clean_input.clean_text(element.source_city) mutable_element['target_city'] = clean_input.clean_text(element.target_city) # TODO: transform all the rest of fields yield mutable_element
[ "noreply@github.com" ]
GoogleCloudPlatform.noreply@github.com
0ddbb08c6b7a062673f7f83b9e2f349c32c73b77
8ebf6311c3c1db40c7bb56051cf4e37e1b85a4f9
/rm-server/templatemanager/templatemanager/mongodb.py
949427035f6d0032ee8be250602351099a125713
[]
no_license
sq591442679/requirements-manager
e8b074afb7fd2a83632f2546d392dab4c35aeeeb
6d664ce338b455150dcc9a86145967e8dd67a9dd
refs/heads/master
2023-07-08T04:38:20.064019
2021-08-11T03:41:13
2021-08-11T03:41:13
392,877,568
0
0
null
null
null
null
UTF-8
Python
false
false
234
py
import pymongo from templatemanager.config import MONGODB_URL client = pymongo.MongoClient(MONGODB_URL) database = client['RequirementsManager'] template_collection = database['Template'] document_collection = database['Document']
[ "591442679@qq.com" ]
591442679@qq.com
729fe63a21b3433191f4134946686e280a343e23
cb5b76716ac04f9bd2eefc2020d9dea7ae9f2123
/04判断年月日天数.py
d1e246e9be03df255643a395408e58acdd93f423
[]
no_license
chengong825/python-test
6c788e47c2ee71457b77d190759d73954489d1fb
e8ac085386eadb562a125cc4428cad9f7b312c3c
refs/heads/master
2020-03-29T13:17:50.411249
2018-10-26T04:40:42
2018-10-26T04:40:42
149,950,556
0
0
null
null
null
null
UTF-8
Python
false
false
1,295
py
y=int(input("输入年份")) m=int(input("输入月份")) d=int(input("输入日号")) def jug(y): if (y%4==0 and y%100 !=0)or(y%400==0): return 1 else: return 0 if jug(y)==1: if m==1: i=d elif m==2: i=31+d elif m==3: i=31+29+d elif m==4: i=31+29+31+d elif m==5: i=31+29+31+30+d elif m==6: i=31+29+31+30+31+d elif m==7: i=31+29+31+30+31+30+d elif m==8: i=31+29+31+30+31+30+31+d elif m==9: i=31+29+31+30+31+30+31+31+d elif m==10: i=31+29+31+30+31+30+31+31+30+d elif m==11: i=31+29+31+30+31+30+31+31+30+31+d elif m==12: i=31+29+31+30+31+30+31+31+30+31+30+d else: if m==1: i=d elif m==2: i=31+d elif m==3: i=31+28+d elif m==4: i=31+28+31+d elif m==5: i=31+28+31+30+d elif m==6: i=31+28+31+30+31+d elif m==7: i=31+28+31+30+31+30+d elif m==8: i=31+28+31+30+31+30+31+d elif m==9: i=31+28+31+30+31+30+31+31+d elif m==10: i=31+28+31+30+31+30+31+31+30+d elif m==11: i=31+28+31+30+31+30+31+31+30+31+d elif m==12: i=31+28+31+30+31+30+31+31+30+31+30+d print("这一天是这一年的第%d天"%i)
[ "252918372@qq.com" ]
252918372@qq.com
294cb258fe310c29e43889691c9291e31eea57cb
b2db386a35e167dd67d6de90d95c06d5c2ed91cd
/657_judgeCircle.py
de842edbcdbca60309bbad6d4b2c427565ed7dc1
[]
no_license
rohitmungre/leetcode
9edb1b8b0cd714eb1a5e1fa847f2e17c455fd624
d49836b2b46a980f073bb9a6f2e47c4a903e48ac
refs/heads/master
2020-08-07T16:55:38.699188
2020-03-12T11:00:13
2020-03-12T11:00:13
213,531,119
1
0
null
null
null
null
UTF-8
Python
false
false
504
py
class Solution(object): def judgeCircle(self, moves): """ :type moves: str :rtype: bool """ y = 0 x = 0 for item in moves: if item == 'U': y = y +1 elif item == 'D': y = y -1 elif item == 'L': x = x -1 elif item == 'R': x = x +1 if x==0 and y==0: return True return False
[ "noreply@github.com" ]
rohitmungre.noreply@github.com
fb25374076a68ad89abab966d09eacb305fd5bdf
a56252fda5c9e42eff04792c6e16e413ad51ba1a
/resources/usr/local/lib/python2.7/dist-packages/scipy/linalg/_solvers.py
a6de2d946676fb45fe7951ef9d4115fd340500bd
[ "Apache-2.0" ]
permissive
edawson/parliament2
4231e692565dbecf99d09148e75c00750e6797c4
2632aa3484ef64c9539c4885026b705b737f6d1e
refs/heads/master
2021-06-21T23:13:29.482239
2020-12-07T21:10:08
2020-12-07T21:10:08
150,246,745
0
0
Apache-2.0
2019-09-11T03:22:55
2018-09-25T10:21:03
Python
UTF-8
Python
false
false
7,241
py
"""Matrix equation solver routines""" # Author: Jeffrey Armstrong <jeff@approximatrix.com> # February 24, 2012 from __future__ import division, print_function, absolute_import import numpy as np from numpy.linalg import inv, LinAlgError from .basic import solve from .lapack import get_lapack_funcs from .decomp_schur import schur from .special_matrices import kron __all__ = ['solve_sylvester', 'solve_lyapunov', 'solve_discrete_lyapunov', 'solve_continuous_are', 'solve_discrete_are'] def solve_sylvester(a,b,q): """ Computes a solution (X) to the Sylvester equation (AX + XB = Q). .. versionadded:: 0.11.0 Parameters ---------- a : (M, M) array_like Leading matrix of the Sylvester equation b : (N, N) array_like Trailing matrix of the Sylvester equation q : (M, N) array_like Right-hand side Returns ------- x : (M, N) ndarray The solution to the Sylvester equation. Raises ------ LinAlgError If solution was not found Notes ----- Computes a solution to the Sylvester matrix equation via the Bartels- Stewart algorithm. The A and B matrices first undergo Schur decompositions. The resulting matrices are used to construct an alternative Sylvester equation (``RY + YS^T = F``) where the R and S matrices are in quasi-triangular form (or, when R, S or F are complex, triangular form). The simplified equation is then solved using ``*TRSYL`` from LAPACK directly. """ # Compute the Schur decomp form of a r,u = schur(a, output='real') # Compute the Schur decomp of b s,v = schur(b.conj().transpose(), output='real') # Construct f = u'*q*v f = np.dot(np.dot(u.conj().transpose(), q), v) # Call the Sylvester equation solver trsyl, = get_lapack_funcs(('trsyl',), (r,s,f)) if trsyl is None: raise RuntimeError('LAPACK implementation does not contain a proper Sylvester equation solver (TRSYL)') y, scale, info = trsyl(r, s, f, tranb='C') y = scale*y if info < 0: raise LinAlgError("Illegal value encountered in the %d term" % (-info,)) return np.dot(np.dot(u, y), v.conj().transpose()) def solve_lyapunov(a, q): """ Solves the continuous Lyapunov equation (AX + XA^H = Q) given the values of A and Q using the Bartels-Stewart algorithm. .. versionadded:: 0.11.0 Parameters ---------- a : array_like A square matrix q : array_like Right-hand side square matrix Returns ------- x : array_like Solution to the continuous Lyapunov equation See Also -------- solve_sylvester : computes the solution to the Sylvester equation Notes ----- Because the continuous Lyapunov equation is just a special form of the Sylvester equation, this solver relies entirely on solve_sylvester for a solution. """ return solve_sylvester(a, a.conj().transpose(), q) def solve_discrete_lyapunov(a, q): """ Solves the Discrete Lyapunov Equation (A'XA-X=-Q) directly. .. versionadded:: 0.11.0 Parameters ---------- a : (M, M) array_like A square matrix q : (M, M) array_like Right-hand side square matrix Returns ------- x : ndarray Solution to the continuous Lyapunov equation Notes ----- Algorithm is based on a direct analytical solution from: Hamilton, James D. Time Series Analysis, Princeton: Princeton University Press, 1994. 265. Print. http://www.scribd.com/doc/20577138/Hamilton-1994-Time-Series-Analysis """ lhs = kron(a, a.conj()) lhs = np.eye(lhs.shape[0]) - lhs x = solve(lhs, q.flatten()) return np.reshape(x, q.shape) def solve_continuous_are(a, b, q, r): """ Solves the continuous algebraic Riccati equation, or CARE, defined as (A'X + XA - XBR^-1B'X+Q=0) directly using a Schur decomposition method. .. versionadded:: 0.11.0 Parameters ---------- a : (M, M) array_like Input b : (M, N) array_like Input q : (M, M) array_like Input r : (N, N) array_like Non-singular, square matrix Returns ------- x : (M, M) ndarray Solution to the continuous algebraic Riccati equation See Also -------- solve_discrete_are : Solves the discrete algebraic Riccati equation Notes ----- Method taken from: Laub, "A Schur Method for Solving Algebraic Riccati Equations." U.S. Energy Research and Development Agency under contract ERDA-E(49-18)-2087. http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf """ try: g = inv(r) except LinAlgError: raise ValueError('Matrix R in the algebraic Riccati equation solver is ill-conditioned') g = np.dot(np.dot(b, g), b.conj().transpose()) z11 = a z12 = -1.0*g z21 = -1.0*q z22 = -1.0*a.conj().transpose() z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22)))) # Note: we need to sort the upper left of s to have negative real parts, # while the lower right is positive real components (Laub, p. 7) [s, u, sorted] = schur(z, sort='lhp') (m, n) = u.shape u11 = u[0:m//2, 0:n//2] u21 = u[m//2:m, 0:n//2] u11i = inv(u11) return np.dot(u21, u11i) def solve_discrete_are(a, b, q, r): """ Solves the disctrete algebraic Riccati equation, or DARE, defined as (X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q), directly using a Schur decomposition method. .. versionadded:: 0.11.0 Parameters ---------- a : (M, M) array_like Non-singular, square matrix b : (M, N) array_like Input q : (M, M) array_like Input r : (N, N) array_like Non-singular, square matrix Returns ------- x : ndarray Solution to the continuous Lyapunov equation See Also -------- solve_continuous_are : Solves the continuous algebraic Riccati equation Notes ----- Method taken from: Laub, "A Schur Method for Solving Algebraic Riccati Equations." U.S. Energy Research and Development Agency under contract ERDA-E(49-18)-2087. http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf """ try: g = inv(r) except LinAlgError: raise ValueError('Matrix R in the algebraic Riccati equation solver is ill-conditioned') g = np.dot(np.dot(b, g), b.conj().transpose()) try: ait = inv(a).conj().transpose() # ait is "A inverse transpose" except LinAlgError: raise ValueError('Matrix A in the algebraic Riccati equation solver is ill-conditioned') z11 = a+np.dot(np.dot(g, ait), q) z12 = -1.0*np.dot(g, ait) z21 = -1.0*np.dot(ait, q) z22 = ait z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22)))) # Note: we need to sort the upper left of s to lie within the unit circle, # while the lower right is outside (Laub, p. 7) [s, u, sorted] = schur(z, sort='iuc') (m,n) = u.shape u11 = u[0:m//2, 0:n//2] u21 = u[m//2:m, 0:n//2] u11i = inv(u11) return np.dot(u21, u11i)
[ "szarate@dnanexus.com" ]
szarate@dnanexus.com
a6102dc1cb29b2ad159b1a763872388f8957f2df
47ca35636ad56f7e3878777fbb85eac9aef32bf7
/initdjango/new_project/mysite/blog/feeds.py
862ba8077866af2a1677f8b26f12c7e869d11657
[]
no_license
vihndsm/Python
0f6bd7ab7583ff8d078d4abc4fd9053a5f65e5cf
72291e76fecca0b1a9176f77bd5f042806ec9b27
refs/heads/master
2022-12-30T00:43:12.439684
2020-10-14T06:35:16
2020-10-14T06:35:16
287,447,563
0
0
null
null
null
null
UTF-8
Python
false
false
447
py
from django.contrib.syndication.views import Feed from django.template.defaultfilters import truncatewords from .models import Post class LatestPostsFeed(Feed): title = 'My blog' link = '/blog/' description = 'New posts of my blog.' def items(self): return Post.published.all()[:5] def item_title(self, item): return item.title def item_description(self, item): return truncatewords(item.body, 30)
[ "67498073+vihndsm@users.noreply.github.com" ]
67498073+vihndsm@users.noreply.github.com
b13ada7f5333de200a2517f5096c67508645daba
879ac03dd910d152170d6d1e3ff4d5e522b14d79
/Algorithms/02. Implementation/014. Migratory Birds.py
d67d433a6fd67112bbc60824b76df05b4f1d4592
[]
no_license
dispe1/Hackerrank-Solutions
ae47920d7761546fd2ef753c1b4f9ae087aaed2a
67b792dc2cb2933eb1f1565100ea13b0c9783fba
refs/heads/master
2020-07-11T21:25:39.824667
2019-12-10T12:00:12
2019-12-10T12:00:12
204,646,756
4
3
null
null
null
null
UTF-8
Python
false
false
602
py
# Problem: https://www.hackerrank.com/challenges/migratory-birds/problem # Difficulty : Easy # Score : 10 import os import collections from functools import reduce def migratoryBirds(arr): count = collections.Counter(arr) ar = list(count.items()) ar.sort() result = reduce(lambda a,b: a if a[1] >= b[1] else b, ar) return result[0] if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') arr_count = int(input().strip()) arr = list(map(int, input().rstrip().split())) result = migratoryBirds(arr) fptr.write(str(result)) fptr.close()
[ "lkjim0757@naver.com" ]
lkjim0757@naver.com
02cb07a36132dd7d9cf6132a4a9203d970779e44
b6cdef81a572e02c0cbd795a8fb6bbc74f99d627
/crypto/urls.py
ac8334a98e1eb790961dc66f3a4ca4105fcf3141
[ "MIT" ]
permissive
sodatta/Stocks-Screener
4afbdd68c1e80dafece50e3e0b967af35dd83c07
0b8da91da40b715beaf3a79163b1bdf6ea3be3b9
refs/heads/master
2023-07-27T13:14:47.798403
2021-05-03T20:04:51
2021-05-03T20:04:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
264
py
from django.urls import path from crypto.views import list_crypto_currencies, view_crypto_currency app_name = 'crypto' urlpatterns = [ path('', list_crypto_currencies, name="home"), path('<int:pk>', view_crypto_currency, name='view_crypto_currency'), ]
[ "mohammedshokr2014@gmail.com" ]
mohammedshokr2014@gmail.com
c8be4c5e699d6d7172c3c72da43067f3395f3401
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/303/usersdata/304/85170/submittedfiles/minha_bib.py
d3fce95fc32ffe08c69a6451d3e38b683df07624
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
640
py
# -*- coding: utf-8 -*- import time #COMECE AQUI ABAIXO def hello_world() : print('Olá mundo') return def hello_world2() : texto = 'Olá mundo' return texto def media(n1,n2) : m = (n1 + n2)/2.0 return m def multiplicacao(x,y) : m = (x*y) return m def media(n1,n2) : m = (n1 + n2)/2.0 return m def fatorial(n) : f = 1 for i in range (2, n+1, 1): f *= i print ('Estou em %d' %i) return f def cronometro(s) : for i in range (s,-1,-1): print('Faltam %d segundos' %i) time.sleep(1) print('ACABOOU')
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
770c415c4e2aed55dfbfce8f6afa2f28a83f3cb2
726d8518a8c7a38b0db6ba9d4326cec172a6dde6
/0657. Robot Return to Origin/Solution.py
81f8ee2aef608a5a9f1aedd420095492bda26e3c
[]
no_license
faterazer/LeetCode
ed01ef62edbcfba60f5e88aad401bd00a48b4489
d7ba416d22becfa8f2a2ae4eee04c86617cd9332
refs/heads/master
2023-08-25T19:14:03.494255
2023-08-25T03:34:44
2023-08-25T03:34:44
128,856,315
4
0
null
null
null
null
UTF-8
Python
false
false
486
py
class Solution: def judgeCircle_MK1(self, moves: str) -> bool: x = y = 0 for move in moves: if move == 'U': x += 1 elif move == 'D': x -= 1 elif move == 'R': y += 1 else: y -= 1 return x == y == 0 def judgeCircle_MK2(self, moves: str) -> bool: return (moves.count('U') == moves.count('D')) and (moves.count('R') == moves.count('L'))
[ "faterazer@outlook.com" ]
faterazer@outlook.com
d500eac11046f0eebdbfcef856acfda872d27fab
83048ab1abb6941ed0b19fb5e5ff4a9d14b48e8c
/fractional_knapsack.py
1bcfcaa88935e415750cbac10ba752d5fb2affe1
[]
no_license
harshitalpha/Algorithms
ebad07cc77516ab5c35ae414462d10a38d5ef97e
2f7dcf4c3bb4390267231c7c96f7e76399c0166e
refs/heads/master
2021-07-14T17:34:02.546583
2020-06-25T06:38:39
2020-06-25T06:38:39
178,813,562
2
0
null
null
null
null
UTF-8
Python
false
false
1,976
py
''' KNAPSACK PROBLEM PARADIGM - GREEDY Date : 12-Feb-2020 Name - Harshit Singhal ''' def knapsack(profit, weights, max_weight): ''' The idea to implement is that take two list of profit and weights we make list of index = [0,1,2,...] then we calculate the profit and weight ratio and store in list named ratio then we will sort index list according to ratio matrix eg : index = [0,1,2,3] ratio = [4,6,1,3] after sorting index = [1,0,3,2] 6 is largest and index corrosponding to 6 place first for this we use following comand index.sort(key = lambda i:ratio[i], reverse = True) we will use 'zip' some place in code use of zip is that in iterable and return iterator >>> numbers = [1, 2, 3] >>> letters = ['a', 'b', 'c'] >>> zipped = zip(numbers, letters) >>> zipped # Holds an iterator object <zip object at 0x7fa4831153c8> >>> type(zipped) <class 'zip'> >>> list(zipped) [(1, 'a'), (2, 'b'), (3, 'c')] then we follow the regular approch of solving greedy problem ''' print("WEIGHTS GIVEN = {}".format(weights)) print("PROFIT GIVEN = {}".format(profit)) print("MAX WEIGHT CAN CARRY = {}".format(max_weight)) index = list(range(len(weights))) ratio = [v/w for v,w in zip(profit, weights)] index.sort(key = lambda i:ratio[i], reverse = True) ans_weights = [0] * len(weights) for i in index: if(weights[i] <= max_weight): ans_weights[i] = 1 max_weight = max_weight - weights[i] else: ans_weights[i] = float(float(max_weight) / float(weights[i])) break # Total Profit final_profit = 0 for i in range(len(weights)): final_profit = final_profit + (ans_weights[i] * profit[i]) print("WEIGHT OF EACH OBJECT CAN CARRY = {}".format(ans_weights)) print("FINAL PROFIT = {}".format(final_profit)) profit = [10,5,15,7,6,18,3] weights = [2,3,5,7,1,4,1] knapsack(profit,weights,15)
[ "harshitsinghal1103@gmail.com" ]
harshitsinghal1103@gmail.com
1373c132d750faa29f5c0215c25e9f3c8ae56242
330a8979441a9cae2c7af07e6a03080482cfb944
/src/lib/commands/catch.py
1a96b88c2fdd44b05f0a3b2c8ebe156ce3df1cb5
[]
no_license
singlerider/lorenzotherobot
025770732c3de299437a9d49a4669bcb1b2b7f32
d0cac10afd19335aad4145c99ffec5413b97a22a
refs/heads/master
2020-12-26T03:43:35.423157
2018-07-21T04:53:57
2018-07-21T04:53:57
28,792,870
26
19
null
2016-04-17T02:06:14
2015-01-05T01:52:04
Python
UTF-8
Python
false
false
1,246
py
import globals from src.lib.queries.points_queries import * from src.lib.queries.pokemon_queries import * def catch(**kwargs): channel = kwargs.get("channel", "testchannel").lstrip("#") if globals.CHANNEL_INFO[channel]['caught'] is False: pokemon_trainer = kwargs.get("username", "testuser") # This is here for if the user is brand new. This creates an entry in # the users table, which userpokemon is dependent on modify_user_points(pokemon_trainer, 0) open_position, occupied_positions = find_open_party_positions( pokemon_trainer) desired_level = 5 pokemon_id = get_pokemon_id_from_name( globals.CHANNEL_INFO[channel]['pokemon']) if pokemon_id is None: return "Pokemon not found! Check your spelling" if len(open_position) > 0: globals.CHANNEL_INFO[channel]['caught'] = True return insert_user_pokemon( pokemon_trainer, pokemon_trainer, open_position[0], pokemon_id, desired_level, globals.CHANNEL_INFO[channel]['pokemon'], None, None) else: return "No open slots in your party." else: return "Too slow!"
[ "haroboy876@gmail.com" ]
haroboy876@gmail.com
093d87a094d7b4d8250c0340cb4a4b8ade5abff5
4c49fdc4608a49dfacea02fba364deb295ef78dc
/backend/texty_friends_22060/urls.py
8b36763e08c9a1ef7bbc0699bd7e2bbbad8553a9
[]
no_license
crowdbotics-apps/texty-friends-22060
4b9db76082d0b536c24321d4faf6a65c04799b2b
7cb9bbc9f788844d1fd08172906cac66397f104d
refs/heads/master
2023-01-08T21:21:17.337089
2020-10-28T15:55:31
2020-10-28T15:55:31
308,067,651
0
0
null
null
null
null
UTF-8
Python
false
false
1,949
py
"""texty_friends_22060 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from allauth.account.views import confirm_email from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi urlpatterns = [ path("", include("home.urls")), path("accounts/", include("allauth.urls")), path("api/v1/", include("home.api.v1.urls")), path("admin/", admin.site.urls), path("users/", include("users.urls", namespace="users")), path("rest-auth/", include("rest_auth.urls")), # Override email confirm to use allauth's HTML view instead of rest_auth's API view path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email), path("rest-auth/registration/", include("rest_auth.registration.urls")), ] admin.site.site_header = "Texty Friends" admin.site.site_title = "Texty Friends Admin Portal" admin.site.index_title = "Texty Friends Admin" # swagger api_info = openapi.Info( title="Texty Friends API", default_version="v1", description="API documentation for Texty Friends App", ) schema_view = get_schema_view( api_info, public=True, permission_classes=(permissions.IsAuthenticated,), ) urlpatterns += [ path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs") ]
[ "team@crowdbotics.com" ]
team@crowdbotics.com
d8ca05184091d8e0b194f3a2b89226c67244e6b8
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/nugyAtjRNQPTHLJNR_15.py
1fcf64ccafcc2df3b1ed06c5d4dbeb404587d3cc
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
779
py
""" Suppose that you add all of the page numbers in a book. If the total is 21, the book could only have 6 pages because 1 + 2 + 3 + 4 + 5 + 6 = 21. If the total were 25, that would be impossible because the next number in the series is 28 (21 + 7). Create a function that, given the `total` number of pages as an argument, returns `True` if it is a valid total and `False` if it is not. Can you devise a solution that is more efficient than simply adding consecutive integers as I did above? ### Examples pages_in_book(5) ➞ False pages_in_book(4005) ➞ True pages_in_book(9453) ➞ True ### Notes N/A """ def pages_in_book(total): d = 0 for i in range(1, total+1): d+=i if total == d: return True else: return False
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
9c91579e1fcf5ada284b7da45dea1444dc10b0e7
44bfafa7a3de51e089a470afaa7d37e4e1176777
/seqspy/Evap_UVMOT_MultiPulse_Image_ZEROCROSSING.py
9ca09d37b9f68c9699029d2eccfc1c2b090d3609
[]
no_license
drlightx/apparatus3-seq
b9bc4bd5d9b3a95f8610afff28ee7baea951b641
4505a2f484ecea2390482fb4ddf16ac9ca63b02d
refs/heads/master
2021-01-18T06:41:38.874121
2012-03-04T23:03:59
2012-03-04T23:03:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,256
py
"""Make sure the report file given by (L:/data/app3/Savedir)report(L:/data/app3/RunNumber).INI exists otherwise this code won't compile. """ __author__ = "Pedro M Duarte" import time t0=time.time() print "\n----- Evap_UVMOT_Image_ZEROCROSSING.py -----\n" import sys, math sys.path.append('L:/software/apparatus3/seq/utilspy') sys.path.append('L:/software/apparatus3/seq/seqspy') sys.path.append('L:/software/apparatus3/convert') import seq, wfm, gen, cnc, odt, andor, highfield_uvmot report=gen.getreport() #PARAMETERS stepsize = float(report['SEQ']['stepsize']) tof = float(report['ANDOR']['tof']) exp = float(report['ANDOR']['exp']) noatoms = float(report['ANDOR']['noatoms']) #SEQUENCE s=seq.sequence(stepsize) s=gen.initial(s) s.wait(0.0) s.digichg('hfimg',1) s.digichg('odt7595',0) #Do CNC, UVMOT, and field ramps s, toENDBFIELD = highfield_uvmot.go_to_highfield(s) # Add evaporation ramp to ODT free = float(report['EVAP']['free']) image= float(report['EVAP']['image']) buffer=10.0 #Time needed to re-latch the trigger for the AOUTS if free < buffer + toENDBFIELD : print 'Need at list ' + str(buffer) + 'ms of free evap before evaporation can be triggered' print 'Currently ramps end at %f , and free is %f' % (toENDBFIELD,free) exit(1) s.wait(free) odtpow, ENDEVAP, cpowend, ipganalog = odt.odt_evap(image) evap_ss = float(report['EVAP']['evapss']) bias = float(report['FESHBACH']['bias']) zcrampdt = float(report['ZEROCROSS']['zcrampdt']) zcdt = float(report['ZEROCROSS']['zcdt']) zcbias = float(report['ZEROCROSS']['zcbias']) bfield = wfm.wave('bfield',bias,evap_ss) #~ bfield.extend(odtpow.dt()-zcdt-zcrampdt) #~ bfield.linear(zcbias,zcrampdt) #~ bfield.extend(odtpow.dt()) bfield.extend(odtpow.dt()) bfield.linear(zcbias,zcrampdt) bfield.appendhold(zcdt) odtpow.extend(bfield.dt()) ipganalog.extend(bfield.dt()) #s.analogwfm_add(evap_ss,[odtpow,bfield]) s.analogwfm_add(evap_ss,[odtpow,bfield,ipganalog]) # ENDEVAP should be equal to image #~ s.wait(image) s.wait(image+zcdt+zcrampdt) #RELEASE FROM IR TRAP s.digichg('odtttl',0) odttof = float(report['ODT']['odttof']) s.wait(odttof) #Shine probe multiple times before taking the final picture #Test for how far detuned is the phase-contrast imaging multiN = int(report['ANDOR']['multiN']) multiDelta = float(report['ANDOR']['multiDelta']) multidt = float(report['ANDOR']['multidt']) s = andor.multiProbe(s, 'probe', multiN, multiDelta, multidt) #TAKE PICTURES light = 'probe' #light = 'motswitch' #light = 'bragg' trap_on_picture = 1 kinetics = gen.bstr('Kinetics',report) print '...kinetics = ' + str(kinetics) if kinetics == True: s,SERIESDT = andor.KineticSeries4(s,exp,light,noatoms, trap_on_picture) else: s,SERIESDT = andor.FKSeries2(s,stepsize,exp,light,noatoms, trap_on_picture) #After taking a picture sequence returns at time of the last probe strobe #Wait 30ms to get past the end s.wait(30.0) s=gen.shutdown(s) s.digichg('odtttl',0) s.digichg('odt7595',0) s.save('L:/software/apparatus3/seq/seqstxt/expseq.txt') s.clear_disk() print '...Compilation = %.2f seconds\n' % (time.time()-t0)
[ "pmd323@gmail.com" ]
pmd323@gmail.com
3a6764ab34cea0fccf1ae594504c307f26b60961
60e50b82636a7652c0c9d5e4a5fec50d49ac49ae
/src/scraping/models.py
a57819eb7ff44378535b96505f38ccac9cce5fa7
[]
no_license
SnottyJACK/scraping_service
5289922529dd8f832c8ecacfc32b66b56276d24d
de887b6b2b9c43e657c56a76038484c53a35afbd
refs/heads/master
2022-11-08T15:47:18.348350
2020-08-10T18:45:04
2020-08-10T18:45:04
285,887,020
0
0
null
null
null
null
UTF-8
Python
false
false
2,113
py
from django.db import models from scraping.utils import from_cyrillic_to_eng # Create your models here. class City(models.Model): name = models.CharField(max_length=250, verbose_name="Название города", unique=True) slug = models.CharField(max_length=250, blank=True, unique=True) class Meta: verbose_name ='Название города' verbose_name_plural = 'Названия городов' def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = from_cyrillic_to_eng(str(self.name)) super().save(*args, **kwargs) class Language(models.Model): name = models.CharField(max_length=250, verbose_name="Язык программирования", unique=True) slug = models.CharField(max_length=250, blank=True, unique=True) class Meta: verbose_name = 'Язык программирования' verbose_name_plural = 'Языки программирования' def __str__(self): return self.name def save(self, *args, **kwargs): if not self.slug: self.slug = from_cyrillic_to_eng(str(self.name)) super().save(*args, **kwargs) class Vacancy(models.Model): url = models.URLField(unique=True) title = models.CharField(max_length=250, verbose_name='Заголовок вакансии') company = models.CharField(max_length=250, verbose_name='Компания') description = models.TextField(verbose_name='Описание вакансии') city = models.ForeignKey('city', on_delete=models.CASCADE, verbose_name='Город') language = models.ForeignKey('language', on_delete=models.CASCADE, verbose_name='Язык программирования') timestamp = models.DateField(auto_now_add=True) class Meta: verbose_name ='Вакансия' verbose_name_plural = 'Вакансии' def __str__(self): return self.title
[ "snottyjack@yandex.ru" ]
snottyjack@yandex.ru
542dec1aa09ea22906b5ff9b37e525552243c9d1
ba2a05f20454bda428f140634bc602699f164fc4
/00.SSAFY/1.first-semester/01_basic/get_data/project2.py
a55a6a9741d9670b02ec835a95d9dc587d8db3f3
[]
no_license
snowink1137/TIL
734da402e99afa52f1af4ef996a6b274b1bcce0b
9e9c78eb0c892affc88e2d46e143cef98af743fb
refs/heads/master
2023-01-08T18:26:34.311579
2021-11-14T11:04:22
2021-11-14T11:04:22
162,255,934
0
0
null
2023-01-07T11:09:09
2018-12-18T08:32:44
Jupyter Notebook
UTF-8
Python
false
false
3,098
py
import requests import datetime import copy import csv import os KOBIS_KEY = os.getenv('KOBIS_KEY') # csv 데이터 읽고 영화 code 리스트 만들기 boxoffice = open('boxoffice.csv', 'r', encoding='utf-8') reader = csv.reader(boxoffice) movie_code_list = [] for line in reader: movie_code_list.append(line[0]) del movie_code_list[0] # 영화진흥위원회 데이터 수집 ## url list 만들기 key = KOBIS_KEY weekGb = '0' url_list = [] for code in movie_code_list: url = 'http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?' + 'key=' + key + '&movieCd=' + code url_list.append(url) ## 상세 정보 데이터 수집 movie_data = ['영화 대표코드', '영화명(국문)', '영화명(영문)', '영화명(원문)', '개봉연도', '상영시간', '장르', '감독명', '배우1', '배우2', '배우3'] for url in url_list: response = requests.get(url) response_json = response.json() movie_data.append(response_json['movieInfoResult']['movieInfo']['movieCd']) movie_data.append(response_json['movieInfoResult']['movieInfo']['movieNm']) movie_data.append(response_json['movieInfoResult']['movieInfo']['movieNmEn']) movie_data.append(response_json['movieInfoResult']['movieInfo']['movieNmOg']) movie_data.append(response_json['movieInfoResult']['movieInfo']['prdtYear']) movie_data.append(response_json['movieInfoResult']['movieInfo']['showTm']) movie_data.append(response_json['movieInfoResult']['movieInfo']['genres'][0]['genreNm']) movie_data.append(response_json['movieInfoResult']['movieInfo']['directors'][0]['peopleNm']) if len(response_json['movieInfoResult']['movieInfo']['actors']) > 2: movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][0]['peopleNm']) movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][1]['peopleNm']) movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][2]['peopleNm']) elif len(response_json['movieInfoResult']['movieInfo']['actors']) == 2: movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][0]['peopleNm']) movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][1]['peopleNm']) movie_data.append('') elif len(response_json['movieInfoResult']['movieInfo']['actors']) == 1: movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][0]['peopleNm']) movie_data.append('') movie_data.append('') elif len(response_json['movieInfoResult']['movieInfo']['actors']) == 0: movie_data.append('') movie_data.append('') movie_data.append('') f = open('movie.csv', 'a+', encoding='utf-8', newline='') for i in range(44): writer = csv.writer(f) writer.writerow( [movie_data[11*i + 0], movie_data[11*i + 1], movie_data[11*i + 2], movie_data[11*i + 3], movie_data[11*i + 4], movie_data[11*i + 5], movie_data[11*i + 6], movie_data[11*i + 7], movie_data[11*i + 8], movie_data[11*i + 9], movie_data[11*i + 10]] ) f.close()
[ "snowink1137@gmail.com" ]
snowink1137@gmail.com
1bb0502f3504e1ee2481c92a2c7b19b89e50a5d0
7fbf91c595f3adb67e29ab879a0b215581d260bf
/scrapy爬虫/京东-m/JDSpider/items.py
c4dd966451147a80e44f3bfaf4b1609f9b3c996d
[]
no_license
Randyedu/python
69947b3836e62d0081d92591ae2acd9a54eadb9a
5f9e7bec295ae05eadde0f661e7039c2bd08f725
refs/heads/master
2021-04-26T22:20:22.555128
2018-03-02T07:01:27
2018-03-02T07:01:27
124,074,741
1
0
null
2018-03-06T12:23:42
2018-03-06T12:23:42
null
UTF-8
Python
false
false
3,741
py
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy import Item, Field class CategoriesItem(Item): name = Field() #分类名称 url = Field() #分类url _id = Field() #分类id index = Field() #分类的index class ProductsItem(Item): name = Field() #产品名称 url = Field() #产品url _id = Field() #产品id category = Field() #产品分类 reallyPrice = Field() #产品价格 originalPrice = Field() #原价 description = Field() #产品描述 shopId = Field() #shop id venderId = Field() #vender id commentCount = Field() #评价总数 goodComment = Field() #好评数 generalComment = Field() #中评数 poolComment = Field() #差评数 favourableDesc1 = Field() #优惠描述1 favourableDesc2 = Field() #优惠描述2 class ShopItem(Item): _id = Field() #店铺名称 name = Field() #店铺名称 url1 = Field() #店铺url1 url2 = Field() #店铺url2 shopId = Field() #shop id venderId = Field() #vender id class CommentItem(Item): _id = Field() productId = Field() #同ProductsItem的id相同 guid = Field() content = Field() creationTime = Field() isTop = Field() referenceId = Field() referenceName = Field() referenceType = Field() referenceTypeId = Field() firstCategory = Field() secondCategory = Field() thirdCategory = Field() replyCount = Field() score = Field() status = Field() title = Field() usefulVoteCount = Field() uselessVoteCount = Field() userImage = Field() userImageUrl = Field() userLevelId = Field() userProvince = Field() viewCount = Field() orderId = Field() isReplyGrade = Field() nickname = Field() userClient = Field() mergeOrderStatus = Field() discussionId = Field() productColor = Field() productSize = Field() imageCount = Field() integral = Field() userImgFlag = Field() anonymousFlag = Field() userLevelName = Field() plusAvailable = Field() recommend = Field() userLevelColor = Field() userClientShow = Field() isMobile = Field() days = Field() afterDays = Field() class CommentImageItem(Item): _id = Field() associateId = Field() #和CommentItem的discussionId相同 productId = Field() #不是ProductsItem的id,这个值为0 imgUrl = Field() available = Field() pin = Field() dealt = Field() imgTitle = Field() isMain = Field() class CommentSummaryItem(Item): _id = Field() goodRateShow = Field() poorRateShow = Field() poorCountStr = Field() averageScore = Field() generalCountStr = Field() showCount = Field() showCountStr = Field() goodCount = Field() generalRate = Field() generalCount = Field() skuId = Field() goodCountStr = Field() poorRate = Field() afterCount = Field() goodRateStyle = Field() poorCount = Field() skuIds = Field() poorRateStyle = Field() generalRateStyle = Field() commentCountStr = Field() commentCount = Field() productId = Field() #同ProductsItem的id相同 afterCountStr = Field() goodRate = Field() generalRateShow = Field() jwotestProduct = Field() maxPage = Field() score = Field() soType = Field() imageListCount = Field() class HotCommentTagItem(Item): _id = Field() name = Field() status = Field() rid = Field() productId = Field() count = Field() created = Field() modified = Field() type = Field() canBeFiltered = Field()
[ "954950195@qq.com" ]
954950195@qq.com
6829ee431be6466d0b34b15755cf9dcbdc3bf445
552d470963e23741762f2c18143557596f9f433f
/catalog/admin.py
cbee91a1712e5fd628da2d19eeac1e506ec0f707
[]
no_license
WilliamPerezBeltran/django_practice1
ccab7dac5593b34cebd09b1b6a9576cc9030c228
e5735e1009faad448c49f61c42afb2fc0db9e48e
refs/heads/master
2020-04-08T13:57:22.004619
2018-11-28T00:30:58
2018-11-28T00:30:58
159,415,584
0
0
null
null
null
null
UTF-8
Python
false
false
1,331
py
from django.contrib import admin from catalog.models import Author, Genre, Book, BookInstance # admin.site.register(Book) #admin.site.register(Author) admin.site.register(Genre) # admin.site.register(BookInstance) # Define the admin class class AuthorAdmin(admin.ModelAdmin): list_display = ('last_name', 'first_name', 'date_of_birth', 'date_of_death') fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')] # Register the admin class with the associated model admin.site.register(Author, AuthorAdmin) class BooksInstanceInline(admin.TabularInline): model = BookInstance # @register decorator to register the models (this does exactly # the same thing as the admin.site.register() syntax) # @register =admin.site.register() # Register the Admin classes for Book using the decorator @admin.register(Book) class BookAdmin(admin.ModelAdmin): list_display = ('title', 'author', 'display_genre') inlines = [BooksInstanceInline] # Register the Admin classes for BookInstance using the decorator @admin.register(BookInstance) class BookInstanceAdmin(admin.ModelAdmin): list_filter = ('status', 'due_back') fieldsets = ( (None, { 'fields': ('book', 'imprint', 'id') }), ('Availability', { 'fields': ('status', 'due_back') }), )
[ "williampbeltranprogramador@gmail.com" ]
williampbeltranprogramador@gmail.com
a14bdfeb905147643da75e344e16bda463ba04de
09c39de5aad7b283cfac2f09a2b93e43086846d2
/Unit 10 Advanced Topics in Python/02 Introduction to Bitwise Operators/The Bitwise Operators/6-Slide to the Left! Slide to the Right!.py
4be1e2a75a05f98a77cf829b748bf1d8d2ca9dd2
[ "MIT" ]
permissive
lpython2006e/python-samples
b4e84080259faf75b41fb2fd4fb9d2fbc9f857aa
b94ba67ce0d7798ecf796dadae206aa75da58301
refs/heads/master
2023-01-21T13:16:13.295163
2020-11-29T11:01:50
2020-11-29T11:01:50
278,653,779
1
0
null
null
null
null
UTF-8
Python
false
false
164
py
shift_right = 0b1100 shift_left = 0b1 # Your code here! shift_right = shift_right >> 2 shift_left = shift_left << 2 print(bin(shift_right)) print(bin(shift_left))
[ "lent@hivetech.vn" ]
lent@hivetech.vn
1e28fd5b3d89457860ba8f712835fb26b59c0ce9
fb4e41a40d82427e3948549653cdf0405e6dba2b
/app/main/routes.py
275d16634e7603b9970b08849ae8afe2c331cd8a
[]
no_license
Axeh99/Axeh99s-Website
5afeef75943722a7c3e67554da57daeb01c70f7a
7704fbaae7aa26bb1c208fbc18bb6679ea9e3215
refs/heads/master
2022-12-10T21:37:16.296575
2020-09-01T16:12:27
2020-09-01T16:12:27
283,016,745
1
0
null
2021-03-04T20:03:07
2020-07-27T21:07:14
HTML
UTF-8
Python
false
false
455
py
from flask import Blueprint, render_template, request from app.models import Post main = Blueprint("main", __name__) @main.route("/") @main.route("/home") def home(): page = request.args.get("page", 1, type=int) posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5) return render_template("home.html", posts=posts) @main.route("/about") def about(): return render_template("about.html", title="About")
[ "sralloza@gmail.com" ]
sralloza@gmail.com
07b01473d13eb88ada3a05a4badbc11557620cc4
b0a274023658af5202b34772836a17876e2429c4
/ballet/eng/ts.py
901571acacbded4a2a29188533e0c7399998ae6d
[ "MIT" ]
permissive
pvk-developer/ballet
a785a89c5fabf4eac74a732dd81c600b5a4a3761
1b720790aff072d1b004c7df0f70860bf4947204
refs/heads/master
2021-02-12T17:04:00.317030
2020-03-02T05:20:30
2020-03-02T05:20:30
null
0
0
null
null
null
null
UTF-8
Python
false
false
978
py
from sklearn.pipeline import FeatureUnion from ballet.eng.base import GroupedFunctionTransformer __all__ = ['SingleLagger', 'make_multi_lagger'] class SingleLagger(GroupedFunctionTransformer): """Transformer that applies a lag operator to each group Args: lag (int): lag to apply groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby """ def __init__(self, lag, groupby_kwargs=None): super().__init__(lambda x: x.shift(lag), groupby_kwargs=groupby_kwargs) def make_multi_lagger(lags, groupby_kwargs=None): """Return a union of transformers that apply different lags Args: lags (Collection[int]): collection of lags to apply groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby """ laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags] feature_union = FeatureUnion([ (repr(lagger), lagger) for lagger in laggers ]) return feature_union
[ "micahjsmith@gmail.com" ]
micahjsmith@gmail.com
3850bc892d13df29ec269ea05acd45528302e442
a14dd601cde67f67d0ba38dfd1362f7c0109cef1
/recursion/leetcode/word-break/count.py
56dba130f8e99755805eb6a3116f554bd9cee700
[]
no_license
Meaha7/dsa
d5ea1615f05dae32671af1f1c112f0c759056473
fa80219ff8a6f4429fcf104310f4169d007af712
refs/heads/main
2023-09-03T18:52:41.950294
2021-11-05T09:14:42
2021-11-05T09:14:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
892
py
# T=n²+2ⁿ+w,S=2ⁿnw def x(s, words): def dfs(s): if not s: return 1 count = 0 for word in words: if s.startswith(word): count += dfs(s[len(word):]) return count return dfs(s) # T=n²+2ⁿ+w,S=2ⁿnw def y(s, words): words = set(words) def dfs(s): if not s: return 1 count = 0 for i in range(len(s)): prefix, suffix = s[:i + 1], s[i + 1:] if prefix in words: count += dfs(suffix) return count return dfs(s) for s, words in [ ('nikhil', ['nikhil']), ('catsanddog', ['cat', 'cats', 'and', 'sand', 'dog']), ('pineapplepenapple', ['apple', 'pen', 'applepen', 'pine', 'pineapple']), ('catsandog', ['cats', 'dog', 'sand', 'and', 'cat']), ]: print(x(s, words), end=' ') print(y(s, words))
[ "nikhilgoyal104ah4@gmail.com" ]
nikhilgoyal104ah4@gmail.com
30bdce0bca57d062eace71dc5e23763d2ce913df
de3413c3af4ac0a76d817a7e624d8d2e08379003
/svm/svm_author_id.py
4581a12bca152e5b98a1c9173154dcebc7ef9243
[]
no_license
victorlifan/ud120-projects_mechine_learning_ex
8aded0e48955e1970ab501567317e660fdce97e9
39834cf8607dd448db332937953bf0a8a7303832
refs/heads/master
2022-07-10T15:03:04.212606
2020-04-14T02:02:22
2020-04-14T02:02:22
250,891,814
0
0
null
2022-06-22T01:35:58
2020-03-28T20:56:13
Jupyter Notebook
UTF-8
Python
false
false
1,319
py
#!/usr/bin/python """ This is the code to accompany the Lesson 2 (SVM) mini-project. Use a SVM to identify emails from the Enron corpus by their authors: Sara has label 0 Chris has label 1 """ import sys from time import time sys.path.append("../tools/") from email_preprocess import preprocess ### features_train and features_test are the features for the training ### and testing datasets, respectively ### labels_train and labels_test are the corresponding item labels features_train, features_test, labels_train, labels_test = preprocess() ### limit training data to improve training time #features_train = features_train[:len(features_train)/100] #labels_train = labels_train[:len(labels_train)/100] ######################################################### ### your code goes here ### from sklearn.svm import SVC #clf= SVC(kernel='linear') clf = SVC(kernel = 'rbf', C= 10000) t0= time() clf.fit(features_train,labels_train) print('training time:', round(time()-t0, 3),'s') t0= time() pre = clf.predict(features_test) print("there are {} emails are predicted to be in 'Chris' and {} in 'Sara'".format(sum(pre), pre.shape[0]-sum(pre))) print('prediction time:', round(time()-t0,3),'s') print(clf.score(features_test,labels_test)) #########################################################
[ "victorlifan@vip.qq.com" ]
victorlifan@vip.qq.com
a3dca879c750c6f8833c50913a50f39b7afd6b2c
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02873/s049606753.py
6b59a11dc7ecdf3b53103e6b4936eef9860697ed
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
557
py
s=input() sl=len(s) a=[] count=1 for i in range(sl-1): if s[i+1]==s[i]: count+=1 else: a.append(count) count=1 a.append(count) ans=0 al=len(a) if s[0]=="<": for i in range(0,al-1,2): m,n=max(a[i],a[i+1]),min(a[i],a[i+1]) ans+=(m*(m+1)+n*(n-1))/2 if al%2==1: ans+=a[-1]*(a[-1]+1)/2 elif s[0]==">": ans+=a[0]*(a[0]+1)/2 for i in range(1,al-1,2): m,n=max(a[i],a[i+1]),min(a[i],a[i+1]) ans+=(m*(m+1)+n*(n-1))/2 if al%2==0: ans+=a[-1]*(a[-1]+1)/2 print(int(ans))
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
f7f7789d0731133f29e78799dc2f7183155bde34
8881a4927d893e1e755c0488f76ba7941b379f26
/emp_mgmt_sys/poll/migrations/0002_choice.py
f4b08e4cb314699a645b68f87618ecbf13704cf9
[]
no_license
SatishNitk/Django
6bb839fcf2bc7d70413e3d56ac98124a7a96a5de
d9260c032322a34410d783c39a8f13e8f63b8be4
refs/heads/master
2020-05-24T23:01:35.767388
2019-07-06T13:56:50
2019-07-06T13:56:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
740
py
# Generated by Django 2.0.1 on 2019-05-26 13:24 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('poll', '0001_initial'), ] operations = [ migrations.CreateModel( name='Choice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField(blank=True, null=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('question', models.ForeignKey(on_delete='CASECADE', to='poll.Question')), ], ), ]
[ "satishkrgu95@gmail.com" ]
satishkrgu95@gmail.com
439a7935a465973070be43a439890fa28f94e6da
42b9bafc3c757543328d93fb60269ad4255aae17
/env/lib/python3.7/site-packages/thefuck/rules/git_stash_pop.py
0e143ffd813b34c0326ae221f2e00e2d36363417
[ "MIT" ]
permissive
mejeng/kasir
4fe66d1828e72b64d770426d71185cdd3c54127e
cc6f9158b61c0cb45078ddf798af9588c8771311
refs/heads/master
2020-09-25T03:36:10.144439
2019-11-30T07:59:23
2019-11-30T07:59:23
225,908,795
2
0
MIT
2019-12-04T16:21:15
2019-12-04T16:21:15
null
UTF-8
Python
false
false
485
py
from thefuck.shells import shell from thefuck.specific.git import git_support @git_support def match(command): return ('stash' in command.script and 'pop' in command.script and 'Your local changes to the following files would be overwritten by merge' in command.output) @git_support def get_new_command(command): return shell.and_('git add --update', 'git stash pop', 'git reset .') # make it come before the other applicable rules priority = 900
[ "slashsdull@gmail.com" ]
slashsdull@gmail.com
c0ad9538c01db6eed246cc86917332e8c4e02e0d
ad59fb12042bfd3f5c43eca057d0f747f9e148cf
/Se2iP/usr/lib/enigma2/python/Plugins/Extensions/IPTVPlayer/tsiplayer/addons/resources/hosters/vk.py
3edd8b9d6e4cf32052f7ca686c076546d5635d52
[]
no_license
lexlong2007/eePlugins
d62b787100a7069ad5713a47c5688008063b45ec
167b262fe36901a2d3a2fae6d0f85e2307b3eff7
refs/heads/master
2022-03-09T05:37:37.567937
2022-02-27T01:44:25
2022-02-27T01:44:25
253,012,126
0
0
null
2020-04-04T14:03:29
2020-04-04T14:03:29
null
UTF-8
Python
false
false
3,404
py
#-*- coding: utf-8 -*- # https://github.com/Kodi-vStream/venom-xbmc-addons from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.hosters.hoster import iHoster from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import xbmcgui class cHoster(iHoster): def __init__(self): self.__sDisplayName = 'Vk' self.__sFileName = self.__sDisplayName self.__sHD = '' def getDisplayName(self): return self.__sDisplayName def setDisplayName(self, sDisplayName): self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR] [COLOR khaki]' + self.__sHD + '[/COLOR]' def setFileName(self, sFileName): self.__sFileName = sFileName def getFileName(self): return self.__sFileName def setHD(self, sHD): if 'hd' in sHD: self.__sHD = 'HD' else: self.__sHD = '' def getHD(self): return self.__sHD def getPluginIdentifier(self): return 'vk' def isDownloadable(self): return True def isJDownloaderable(self): return True def getPattern(self): return '' def __getIdFromUrl(self): sPattern = "?([^<]+)" oParser = cParser() aResult = oParser.parse(self.__sUrl, sPattern) if (aResult[0] == True): return aResult[1][0] return '' def __modifyUrl(self, sUrl): if (sUrl.startswith('http://')): oRequestHandler = cRequestHandler(sUrl) oRequestHandler.request() sRealUrl = oRequestHandler.getRealUrl() self.__sUrl = sRealUrl return self.__getIdFromUrl() return sUrl def __getKey(self): oRequestHandler = cRequestHandler(self.__sUrl) sHtmlContent = oRequestHandler.request() sPattern = 'fkzd="(.+?)";' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): aResult = aResult[1][0].replace('.', '%2E') return aResult return '' def setUrl(self, sUrl): self.__sUrl = sUrl def checkUrl(self, sUrl): return True def getUrl(self): return self.__sUrl def getMediaLink(self): return self.__getMediaLinkForGuest() def __getMediaLinkForGuest(self): url=[] qua=[] oRequest = cRequestHandler(self.__sUrl) sHtmlContent = oRequest.request() sPattern = '"url.+?":"(.+?)\.(\d+).mp4' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for aEntry in aResult[1]: url.append(aEntry[0]) qua.append(str(aEntry[1])) dialog2 = xbmcgui.Dialog() ret = dialog2.select('Select Quality', qua) #sUrl = url[ret] + '.' + qua[ret] + '.mp4' api_call = ('%s.%s.mp4') % (url[ret], qua[ret]) if api_call: return True, api_call return False, False
[ "zdzislaw22@windowslive.com" ]
zdzislaw22@windowslive.com
1b1c9a876ce91e05fab954a1120d2c8366fe8202
ec291572e354d0718a42929b84f831363cdbeb4b
/djlib/cron_utils.py
995ed62361da660010f623b0385101d5e64505b0
[]
no_license
Ishayahu/Mizva-CMS
4aaffe01093ca807a5cf2fdec45a3e5213938940
574cd5363132ea19772221c4a4b27415dbf17814
refs/heads/master
2021-01-10T20:26:13.876744
2013-11-26T10:47:02
2013-11-26T10:47:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,970
py
# -*- coding:utf-8 -*- # coding=<utf8> def decronize(fstring): minute,hour,day,month,wday = fstring.split('\t') def get_interval(fstr,min,max): # max - предел интервала: # минута # * или 0-59 # час # * или 0-23 # число # * или 1-31 # месяц # *, 1-12 или имя месяца (см. ниже) # день-недели # *, 0-7 или имя дня (воскресенье - это 0 и 7) max+=1 # если любой интервал if fstr[0]=='*': # совсем любой * - от 0 до 59 if len(fstr)==1: return list(range(0,max)) # любой, но с интервалом */2 - каждые два часа elif len(fstr)>1: return list(range(0,max,int(fstr[2:]))) # если перечисление интервала elif ',' in fstr: return list(map(int,fstr.split(','))) # если интервал elif '-' in fstr: # если интервал с периодом 2-15/2 if '/' in fstr: interval = int(fstr.split('/')[1]) start,end = list(map(int,fstr.split('/')[0].split('-'))) return list(range(start,end,interval)) # если интервал просто 2-15 else: start,end = list(map(int,fstr.split('-'))) return list(range(start,end)) else: return (int(fstr),) minute = get_interval(minute,0,59) hour = get_interval(hour,0,23) month = get_interval(month,1,12) if (day !='*' and wday !='*') or (day =='*' and wday =='*'): day = get_interval(day,1,31) wday = get_interval(wday,0,6) elif day=='*' and wday != '*': day = list() wday = get_interval(wday,0,6) elif day != '*' and wday=='*': day = get_interval(day,1,31) wday = list() return {'minute':minute,'hour':hour,'day':day,'month':month,'wday':wday} def crontab_to_russian(fstr): result = u'В {hour[0]} часов {minute[0]} минут каждый {day} день месяца или каждый {wday} день недели в месяцах {month}'.format(**decronize(fstr)) return result def generate_next_reminder(ranges, stop_date): minute = datetime.datetime.now().minute hour = datetime.datetime.now().hour day = datetime.datetime.now().day month = datetime.datetime.now().month wday = datetime.datetime.now().weekday() year = datetime.datetime.now().year crit_dict = {'month':month,'day':day,'hour':hour,'minute':minute,'wday':wday} crit_max = {'month':13,'day':32,'hour':24,'minute':60,'wday':7} crit_min = {'month':1,'day':1,'hour':0,'minute':0,'wday':0} to_next = False for criteria in ('minute','hour','day','month'): if criteria != 'day': # if criteria == 'month': # print crit_dict # print to_next if to_next: crit_dict[criteria] += 1 to_next = False if crit_dict[criteria] == crit_max[criteria]: crit_dict[criteria] = crit_min[criteria] to_next = True while True: #crit_dict[criteria] <= crit_max[criteria]: if crit_dict[criteria] in ranges[criteria]: break crit_dict[criteria] +=1 if crit_dict[criteria] >= crit_max[criteria]: crit_dict[criteria] = crit_min[criteria] to_next = True else: if to_next: #print 'here' crit_dict['day'] += 1 crit_dict['wday'] += 1 if crit_dict['wday'] == 7: crit_dict['wday'] = 1 to_next = False while True: # crit_dict['day'] <= crit_max['day'] and crit_dict['wday'] <= crit_max['wday']: #print crit_dict if crit_dict['day'] in ranges['day'] or crit_dict['wday'] in ranges['wday']: break crit_dict['day'] += 1 crit_dict['wday'] += 1 if crit_dict['day'] >= crit_max['day']: crit_dict['day'] = crit_min['day'] to_next = True if crit_dict['wday'] >= crit_max['wday']: crit_dict['wday'] = crit_min['wday'] # to_next = True if to_next: year += 1 next_reminder = datetime.datetime(year,crit_dict['month'],crit_dict['day'],crit_dict['hour'],crit_dict['minute']) # return crit_dict['minute'],crit_dict['hour'],crit_dict['day'],crit_dict['month'],crit_dict['wday'] if stop_date and next_reminder > stop_date: return False return next_reminder
[ "ishayahu@mail.ru" ]
ishayahu@mail.ru
594ef675881837a25b2f2cde141c227cf3093caf
25f47c750a150727826cbf873a0ac50eb9e97305
/tests/client/osio_rest_client_test.py
2a50e09091210e18a41090adf8b1b4493f71d266
[ "MIT" ]
permissive
tonywaite/scs_host_rpi
7d06a41c525202663f73133fbfaada4384e97693
5bd09bb9b4410cb47782e2bfab0dd2bbab365b3a
refs/heads/master
2022-12-05T02:51:23.882694
2020-08-19T14:26:53
2020-08-19T14:26:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
702
py
#!/usr/bin/env python3 """ Created on 9 Nov 2016 @author: Bruno Beloff (bruno.beloff@southcoastscience.com) """ from scs_core.client.http_client import HTTPClient from scs_core.osio.client.rest_client import RESTClient # -------------------------------------------------------------------------------------------------------------------- api_key = "43308b72-ad41-4555-b075-b4245c1971db" path = "/v1/orgs/south-coast-science-dev/topics" # -------------------------------------------------------------------------------------------------------------------- rest_client = RESTClient(HTTPClient(False), api_key) rest_client.connect() print(rest_client) data = rest_client.get(path) print(data)
[ "bruno.beloff@southcoastscience.com" ]
bruno.beloff@southcoastscience.com
71ac26b37b14e37540c8c6747bb6ed2674c72de1
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
/Gauss_v45r8/Gen/DecFiles/options/12103121.py
aff6fabbfe181ee752fdebddb4817168e9cada39
[]
no_license
Sally27/backup_cmtuser_full
34782102ed23c6335c48650a6eaa901137355d00
8924bebb935b96d438ce85b384cfc132d9af90f6
refs/heads/master
2020-05-21T09:27:04.370765
2018-12-12T14:41:07
2018-12-12T14:41:07
185,989,173
0
0
null
null
null
null
UTF-8
Python
false
false
1,753
py
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/12103121.py generated: Fri, 27 Mar 2015 15:48:08 # # Event Type: 12103121 # # ASCII decay Descriptor: [B+ -> K+ (KS0 -> pi+ pi-)]cc # from Configurables import Generation Generation().EventType = 12103121 Generation().SampleGenerationTool = "SignalRepeatedHadronization" from Configurables import SignalRepeatedHadronization Generation().addTool( SignalRepeatedHadronization ) Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction" from Configurables import ToolSvc from Configurables import EvtGenDecay ToolSvc().addTool( EvtGenDecay ) ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_KsK=DecProdCut.dec" Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb" Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ] # Ad-hoc particle gun code from Configurables import ParticleGun pgun = ParticleGun("ParticleGun") pgun.SignalPdgCode = 521 pgun.DecayTool = "EvtGenDecay" pgun.GenCutTool = "DaughtersInLHCb" from Configurables import FlatNParticles pgun.NumberOfParticlesTool = "FlatNParticles" pgun.addTool( FlatNParticles , name = "FlatNParticles" ) from Configurables import MomentumSpectrum pgun.ParticleGunTool = "MomentumSpectrum" pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" ) pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ] pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root" pgun.MomentumSpectrum.BinningVariables = "pteta" pgun.MomentumSpectrum.HistogramPath = "h_pteta" from Configurables import BeamSpotSmearVertex pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex") pgun.VertexSmearingTool = "BeamSpotSmearVertex" pgun.EventType = 12103121
[ "slavomirastefkova@b2pcx39016.desy.de" ]
slavomirastefkova@b2pcx39016.desy.de
0f0384d38e52d268984da24a31de51dd057df061
98c6ea9c884152e8340605a706efefbea6170be5
/examples/data/Assignment_4/crvcam001/boxes.py
48a26b479879bf751284a115ee42601517ff5ba7
[]
no_license
MrHamdulay/csc3-capstone
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
refs/heads/master
2021-03-12T21:55:57.781339
2014-09-22T02:22:22
2014-09-22T02:22:22
22,372,174
0
0
null
null
null
null
UTF-8
Python
false
false
653
py
def print_square(): print('*' * 5) for i in range(3): print('*' , " "*3 , '*' , sep="") print('*' * 5) def print_rectangle(width, height): print('*' * width) gap = " " for i in range(height -2): print('*' , gap * (width-2) , '*', sep="") print('*' * width) def get_rectangle (width, height): new_line='\n' star='*' figure="" top_line=(star*width)+new_line bottom_line=(star*width) gap=" " middle="" for i in range(height-2): line=star+(gap*(width-2))+star+new_line middle+=line figure=top_line + middle + bottom_line return figure
[ "jarr2000@gmail.com" ]
jarr2000@gmail.com
a8e5ad6c2627b4a7e10d03f0efbd7d90534a7b5a
445e58746e9ca9b61a2d1a6ede82b7badec059ee
/scripts/download.py
361335c8c1356c0c4c1d4354c59ba3909be583a7
[ "MIT" ]
permissive
agrc/electrofishing-query
cdbc18eec74d4af1c478309a518f898a4db51e92
21934ef11622139ba55f26c30dee17c1d5ac6688
refs/heads/main
2023-08-19T04:51:21.612552
2023-07-04T16:43:02
2023-07-04T21:35:15
181,779,317
0
0
MIT
2023-09-01T02:45:33
2019-04-16T22:55:17
JavaScript
UTF-8
Python
false
false
7,795
py
#!/usr/bin/env python # * coding: utf8 * ''' download.py A script that downloads CSVs of electrofishing data for a given set of sampling event ids. ''' import csv import sys from glob import glob from os import sep, walk from os.path import basename, dirname, join, normpath, realpath from zipfile import ZIP_DEFLATED, ZipFile import arcpy import pyodbc import swq_secrets as secrets def zip_fgdb(path, zip): path = normpath(path) for (dirpath, dirnames, filenames) in walk(path): for file in filenames: # Ignore .lock files if not file.endswith('.lock'): try: zip.write( join(dirpath, file), join(basename(path), join(dirpath, file)[len(path) + len(sep):])) except Exception as e: arcpy.AddWarning( 'error zipping file geodatabase: {}'.format(e)) return None cardinality_lookup = { 'OneToOne': 'ONE_TO_ONE', 'OneToMany': 'ONE_TO_MANY' } def main(ids, type): #: ids: string #: type: string (csv or fgdb) #: returns a path to the zip file ids = ids.split(';') arcpy.AddMessage('ids: {}'.format(ids)) formatted_ids = '\'{}\''.format('\',\''.join(ids)) current_folder = dirname(realpath(__file__)) sql_directory = join(current_folder, 'sql') zip_file_path = join(arcpy.env.scratchFolder, 'data.zip') sde_file_name = 'DATABASE.sde' sde = join(current_folder, sde_file_name) arcpy.AddMessage('scratch folder: ' + arcpy.env.scratchFolder) if not arcpy.Exists(sde): arcpy.management.CreateDatabaseConnection( current_folder, sde_file_name, 'SQL_SERVER', secrets.SERVER, account_authentication='DATABASE_AUTH', username=secrets.USERNAME, password=secrets.PASSWORD, database=secrets.DATABASE) connection = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};' + 'SERVER={};DATABASE={};UID={};PWD={}'.format( secrets.SERVER, secrets.DATABASE, secrets.USERNAME, secrets.PASSWORD)) cursor = connection.cursor() with ZipFile(zip_file_path, 'w', ZIP_DEFLATED) as zip_file: if type == 'fgdb': #: fgdb arcpy.AddMessage('creating file geodatabase') fgdb = join(arcpy.env.scratchFolder, 'data.gdb') if arcpy.Exists(fgdb): arcpy.management.Delete(fgdb) arcpy.management.CreateFileGDB(dirname(fgdb), basename(fgdb)) arcpy.AddMessage('copying sampling events feature class') events_where = 'EVENT_ID IN ({})'.format(formatted_ids) events_layer = arcpy.management.MakeFeatureLayer(join(sde, 'SamplingEvents'), 'events_layer', events_where) arcpy.management.CopyFeatures(events_layer, join(fgdb, 'SamplingEvents')) arcpy.AddMessage('copying stations feature class') stations_where = 'STATION_ID IN (SELECT STATION_ID FROM {}.WILDADMIN.SamplingEvents_evw where {})'.format( secrets.DATABASE, events_where) stations_layer = arcpy.management.MakeFeatureLayer(join(sde, 'Stations'), 'stations_layer', stations_where) arcpy.management.CopyFeatures(stations_layer, join(fgdb, 'Stations')) arcpy.AddMessage('copying streams feature class') stations_where = 'Permanent_Identifier IN (SELECT WATER_ID FROM {}.WILDADMIN.Stations_evw where {})'.format( secrets.DATABASE, stations_where) streams_layer = arcpy.management.MakeFeatureLayer(join(sde, 'UDWRStreams'), 'streams_layer', stations_where) arcpy.management.CopyFeatures(streams_layer, join(fgdb, 'UDWRStreams')) arcpy.AddMessage('copying lakes feature class') stations_where = 'Permanent_Identifier IN (SELECT WATER_ID FROM {}.WILDADMIN.Stations_evw where {})'.format( secrets.DATABASE, stations_where) lakes_layer = arcpy.management.MakeFeatureLayer(join(sde, 'UDWRLakes'), 'lakes_layer', stations_where) arcpy.management.CopyFeatures(lakes_layer, join(fgdb, 'UDWRLakes')) def copy_related_tables(dataset): relationship_classes = arcpy.Describe(join(sde, dataset)).relationshipClassNames for relationship_class in relationship_classes: describe = arcpy.Describe(join(sde, relationship_class)) destination = describe.destinationClassNames[0] primary_key = describe.originClassKeys[0][0] foreign_key = describe.originClassKeys[1][0] destination_is_table = arcpy.Describe(join(sde, destination)).datasetType == 'Table' if destination.split('.')[-1] != dataset and destination_is_table: arcpy.AddMessage('copying {} table'.format(destination)) where = '{} IN (SELECT {} FROM {}.WILDADMIN.{} where {})'.format( foreign_key, primary_key, secrets.DATABASE, dataset, events_where) layer = arcpy.management.MakeTableView(join(sde, destination), destination + '_layer', where) arcpy.management.CopyRows(layer, join(fgdb, destination)) if arcpy.Exists(join(fgdb, relationship_class.split('.')[-1])): continue arcpy.AddMessage('creating {} relationship class'.format(relationship_class)) arcpy.env.workspace = fgdb origin = describe.originClassNames[0].split('.')[-1] cardinality = describe.cardinality arcpy.management.CreateRelationshipClass( origin, destination.split('.')[-1], relationship_class.split('.')[-1], 'SIMPLE', describe.forwardPathLabel, describe.backwardPathLabel, message_direction='BOTH', cardinality=cardinality_lookup[cardinality], origin_primary_key=primary_key, origin_foreign_key=foreign_key ) arcpy.env.workspace = None if destination_is_table: copy_related_tables(destination.split('.')[-1]) copy_related_tables('SamplingEvents') copy_related_tables('Stations') zip_fgdb(fgdb, zip_file) else: #: csvs for query_file in glob(sql_directory + '\*.sql'): csv_name = basename(query_file).replace('sql', 'csv') arcpy.AddMessage(csv_name) with open(query_file, 'r') as file: query = file.read().format(secrets.DATABASE, formatted_ids) cursor.execute(query) csv_file_path = join(arcpy.env.scratchFolder, csv_name) with open(csv_file_path, 'w', newline='') as csv_file: writer = csv.writer(csv_file) #: write headers writer.writerow([x[0] for x in cursor.description]) for row in cursor: writer.writerow(row) zip_file.write(csv_file_path, csv_name) arcpy.AddMessage(zip_file_path) connection.close() del connection return zip_file_path if __name__ == "__main__": main(sys.argv[1], sys.argv[2])
[ "stdavis@utah.gov" ]
stdavis@utah.gov
7c04bf56b1d4694c250bc1011dd80707d168fa04
632099ac0d895943cbbeb9048a2cdfcd21102411
/Novation_Impulse/__init__.py
6f5f46ce2ef01ae52ac870a2cfd2e9d926eaf2c6
[]
no_license
Toniigor/AbletonLive9_RemoteScripts
7f4bbf759a79629584413f6d1797005e8cd7f2ff
fed1e5ee61ea12ea6360107a65a6e666364353ff
refs/heads/master
2021-01-16T21:19:25.330221
2014-06-06T12:33:03
2014-06-06T12:33:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
554
py
#Embedded file name: /Users/versonator/Jenkins/live/Projects/AppLive/Resources/MIDI Remote Scripts/Novation_Impulse/__init__.py from Novation_Impulse import Novation_Impulse def create_instance(c_instance): return Novation_Impulse(c_instance) from _Framework.Capabilities import * def get_capabilities(): return {CONTROLLER_ID_KEY: controller_id(vendor_id=4661, product_ids=[25], model_name='Impulse 25'), PORTS_KEY: [inport(props=[NOTES_CC, REMOTE, SCRIPT]), inport(props=[NOTES_CC, REMOTE]), outport(props=[NOTES_CC, REMOTE, SCRIPT])]}
[ "julien@julienbayle.net" ]
julien@julienbayle.net
669639ec4990e3d78768775b8b7faa030a35400e
f96937b4f6ee9865413978fb85fa2e99e2f842ff
/curriculumBuilder/__init__.py
e3d673e4e7cff389c9d9ed185f26e56dd2ca56d5
[]
no_license
mrjoshida/curriculumbuilder
8adf22d91eae2f3205b177e18dbd8a0909fb2d58
3b700d6a6fea6d1d7d7ed18d5612d6e0853449c3
refs/heads/master
2021-01-17T02:28:11.094855
2019-10-17T22:47:46
2019-10-17T22:47:46
36,847,693
6
3
null
null
null
null
UTF-8
Python
false
false
268
py
from __future__ import absolute_import # import curriculumBuilder.receivers # This will make sure the app is always imported when # Django starts so that shared_task will use this app. # from .celery import app as celery_app import curriculumBuilder.numbering_patch
[ "josh@code.org" ]
josh@code.org
8d92d5c580a5678636e059c2ef71d1e061dd7ee0
056754778e09aa5c412bbeadd77f20667b013648
/markov.py
2f7dfc8804d428b00d5fa056c84ec36f167f9234
[]
no_license
sridevich/markov_chains
22518a4cdcb2be71cd95dd8ef3d3be32912af8d0
b799430373d0ca1bfe5ffc0596a22d4dcea026fc
refs/heads/master
2021-01-11T23:23:00.780606
2017-01-12T02:05:43
2017-01-12T02:05:43
78,574,198
0
0
null
null
null
null
UTF-8
Python
false
false
5,471
py
from random import choice import twitter import os import sys api = twitter.Api( consumer_key=os.environ['TWITTER_CONSUMER_KEY'], consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'], access_token_key=os.environ['TWITTER_ACCESS_TOKEN_KEY'], access_token_secret=os.environ['TWITTER_ACCESS_TOKEN_SECRET'] ) def open_and_read_file(file_path): """Takes file path as string; returns text as string. Takes a string that is a file path, opens the file, and turns the file's contents as one string of text. """ open_text_file = open(file_path).read() # print file_green_eggs return open_text_file def make_chains(text_string): """Takes input text as string; returns _dictionary_ of markov chains. A chain will be a key that consists of a tuple of (word1, word2) and the value would be a list of the word(s) that follow those two words in the input text. For example: >>> make_chains("hi there mary hi there juanita") {('hi', 'there'): ['mary', 'juanita'], ('there', 'mary'): ['hi'], ('mary', 'hi': ['there']} """ chains = {} words = text_string.split() for i in range(len(words)-2): key1 = words[i] key2 = words[i+1] key_words = (key1, key2) value_word = words[i+2] if key_words not in chains: chains[key_words] = [value_word] else: chains[key_words].append(value_word) return chains # def make_more_chains(text_string): # """Takes input text as string; returns _dictionary_ of markov chains. # A chain will be a key that consists of a tuple of (word1, word2) # and the value would be a list of the word(s) that follow those two # words in the input text. # For example: # >>> make_chains("hi there mary hi there juanita") # {('hi', 'there'): ['mary', 'juanita'], ('there', 'mary'): ['hi'], ('mary', 'hi': ['there']} # """ # chains = {} # words = text_string.split() # for i in range(len(words)): # key1 = words[i] # print key1 # key2 = words[i+1] # key_words = (key1, key2) # value_word = words[i+2] # if key_words not in chains: # chains[key_words] = [value_word] # else: # chains[key_words].append(value_word) # return chains def make_text(chains): """Takes dictionary of markov chains; returns random text.""" # key_word randon grabs key key_word = choice(chains.keys()) text = key_word[0] + " " + key_word[1] while key_word in chains: random_word = choice(chains[key_word]) # print random_word text = text + " " + random_word key_word = (key_word[1], random_word) return text[0:140] input_path = sys.argv[1] # Open the file and turn it into one long string input_text = open_and_read_file(input_path) # Get a Markov chain chains = make_chains(input_text) #chains = make_more_chains(input_text) # Produce random text random_text = make_text(chains) api.PostUpdate(random_text) # def open_and_read_file(file_path): # """Takes file path as string; returns text as string. # Takes a string that is a file path, opens the file, and turns # the file's contents as one string of text. # """ # file_green_eggs = open(file_path).read() # # print file_green_eggs # return file_green_eggs # open_and_read_file("green-eggs.txt") # def make_chains(text_string): # """Takes input text as string; returns _dictionary_ of markov chains. # A chain will be a key that consists of a tuple of (word1, word2) # and the value would be a list of the word(s) that follow those two # words in the input text. # For example: # >>> make_chains("hi there mary hi there juanita") # {('hi', 'there'): ['mary', 'juanita'], ('there', 'mary'): ['hi'], ('mary', 'hi': ['there']} # """ # chains = {} # words = text_string.split() # for i in range(len(words)-2): # key1 = words[i] # key2 = words[i+1] # key_words = (key1, key2) # value_word = words[i+2] # if key_words not in chains: # chains[key_words] = [value_word] # else: # chains[key_words].append(value_word) # return chains # def make_text(chains): # """Takes dictionary of markov chains; returns random text.""" # # key_word randon grabs key # key_word = choice(chains.keys()) # text = key_word[0] + " " + key_word[1] # while key_word in chains: # random_word = choice(chains[key_word]) # # print random_word # text = text + " " + random_word # key_word = (key_word[1], random_word) # return text # input_path = "green-eggs.txt" # Open the file and turn it into one long string # input_text = open_and_read_file(input_path) # # Get a Markov chain # chains = make_chains(input_text) # # Produce random text # random_text = make_text(chains) # print random_text # def make_text(chains): # """Takes dictionary of markov chains; returns random text.""" # # key_word randon grabs key # key_word = choice(chains.keys()) # text = key_word[0] + " " + key_word[1] # while key_word in chains: # random_word = choice(chains[key_word]) # # print random_word # text = text + " " + random_word # key_word = (key_word[1], random_word) # return text
[ "no-reply@hackbrightacademy.com" ]
no-reply@hackbrightacademy.com
1e2f8bbd44e82b0308ddca0516aaa3686749ac5e
b7a753180264aaba666da8aaad1bb03ccc0159da
/board/views.py
3eabda8d5d16d2813e82265dd3898bc61943f2b5
[]
no_license
boyl/lighteddjango
ec3630985a8e45bc4313f4a6bfe4d46912a58f72
cf437743d23157b6e888468ac8d2e9cc5bc24bcc
refs/heads/master
2022-12-19T08:39:18.018477
2020-06-30T03:47:42
2020-06-30T03:47:42
173,584,375
0
0
null
2022-12-08T04:51:46
2019-03-03T14:19:12
JavaScript
UTF-8
Python
false
false
5,434
py
import hashlib import requests from django.conf import settings from django.core.signing import TimestampSigner from django.contrib.auth import get_user_model from rest_framework import viewsets, authentication, permissions, filters from rest_framework.renderers import JSONRenderer from rest_framework.pagination import PageNumberPagination import django_filters from django_filters.rest_framework import DjangoFilterBackend from .models import Sprint, Task from .serializers import SprintSerializer, TaskSerializer, UserSerializer # Create your views here. User = get_user_model() class NullFilter(django_filters.BooleanFilter): """Filter on a field set as null or not.""" def filter(self, qs, value): if value is not None: return qs.filter(**{'%s__isnull' % self.field_name: value}) return qs class TaskFilter(django_filters.FilterSet): backlog = NullFilter(field_name='sprint') class Meta: model = Task fields = ('sprint', 'status', 'assigned', 'backlog',) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.filters['assigned'].extra.update({'to_field_name': User.USERNAME_FIELD}) class SprintFilter(django_filters.FilterSet): end_min = django_filters.DateFilter(field_name='end', lookup_expr='gte') end_max = django_filters.DateFilter(field_name='end', lookup_expr='lte') class Meta: model = Sprint fields = ('end_min', 'end_max',) class StandardResultsSetPagination(PageNumberPagination): """ Setting pagination for standard results. """ page_size = 25 page_size_query_param = 'page_size' max_page_size = 100 class DefaultsMixin(object): """ Default settings for view authentication, permissions, filtering and pagination. """ authentication_classes = ( authentication.BasicAuthentication, authentication.TokenAuthentication, # authentication.SessionAuthentication, ) permission_classes = ( permissions.IsAuthenticated, ) pagination_class = StandardResultsSetPagination filter_backends = ( filters.SearchFilter, filters.OrderingFilter, DjangoFilterBackend, ) class UpdateHookMixin(object): """Mixin class to send update information to the websocket server.""" @staticmethod def _build_hook_url(obj): if isinstance(obj, User): model = 'user' else: model = obj.__class__.__name__.lower() proto = 'https' if settings.WATERCOOLER_SECURE else 'http' host = settings.WATERCOOLER_SERVER return f"{proto}://{host}/{model}/{obj.pk}" def _send_hook_request(self, obj, method): url = self._build_hook_url(obj) if method in ('POST', 'PUT'): # Build the body serializer = self.get_serializer(obj) renderer = JSONRenderer() context = dict(request=self.request) body = renderer.render(serializer.data, renderer_context=context) else: body = None headers = { 'content-type': 'application/json', 'X-Signature': self._build_hook_signature(method, url, body) } try: response = requests.request(method, url, data=body, headers=headers, timeout=0.5) response.raise_for_status() except requests.exceptions.ConnectionError: # Host could not be resolved or the connection was refused pass except requests.exceptions.Timeout: # Request timed out pass except requests.exceptions.RequestException: # Server response with 4XX or 5XX status code pass @staticmethod def _build_hook_signature(method, url, body): signer = TimestampSigner(settings.WATERCOOLER_SECRET) body = hashlib.sha256(body or b'').hexdigest() value = f"{method.lower()}:{url}:{body}" return signer.sign(value) def perform_create(self, serializer): super().perform_create(serializer) self._send_hook_request(serializer.instance, 'POST') def perform_update(self, serializer): super().perform_update(serializer) self._send_hook_request(serializer.instance, 'PUT') def perform_destroy(self, instance): self._send_hook_request(instance, 'DELETE') super().perform_destroy(instance) class SprintViewSet(DefaultsMixin, UpdateHookMixin, viewsets.ModelViewSet): """API endpoint for listing and creating sprints.""" queryset = Sprint.objects.order_by('end') serializer_class = SprintSerializer filter_class = SprintFilter search_fields = ('name',) ordering_fields = ('end', 'name', ) class TaskViewSet(DefaultsMixin, UpdateHookMixin, viewsets.ModelViewSet): """API endpoint for listing and creating tasks.""" queryset = Task.objects.all() serializer_class = TaskSerializer filter_class = TaskFilter search_fields = ('name', 'description',) ordering_fields = ('name', 'order', 'started', 'due', 'completed',) class UserViewSet(DefaultsMixin, UpdateHookMixin, viewsets.ReadOnlyModelViewSet): """API endpoint for listing users.""" lookup_field = User.USERNAME_FIELD queryset = User.objects.order_by(User.USERNAME_FIELD) serializer_class = UserSerializer search_fields = (User.USERNAME_FIELD,)
[ "1228417956@qq.com" ]
1228417956@qq.com
ee726d7533579f7a9c1d89fddd7d1581375a412a
b55f70755712b26688b80a8ba3806a4124fbcd11
/LinkedList/remove_duplicate_from_sorted_linkedlist.py
ec55034979d77103ec2b1c458b9b307dd7dbf44a
[]
no_license
Shanshan-IC/Algorithm_Python
a44703a0f33370c47e3e55af70aadeae08d5a1a5
ace23976d2f1f51141498c4c4ea6bca0039b233f
refs/heads/master
2021-09-08T07:16:59.576674
2018-03-08T09:24:01
2018-03-08T09:24:01
114,254,497
0
0
null
null
null
null
UTF-8
Python
false
false
557
py
""" http://www.lintcode.com/zh-cn/problem/remove-duplicates-from-sorted-list/ Definition of ListNode class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next """ class Solution: """ @param: head: head is the head of the linked list @return: head of linked list """ def deleteDuplicates(self, head): pre = head while pre: while pre.next and pre.val == pre.next.val: pre.next = pre.next.next pre = pre.next return head
[ "shanshan.fu15@imperial.ac.uk" ]
shanshan.fu15@imperial.ac.uk
9551014bbb31c958619230f90f70227bf926855c
ad0cbe6e2f3862490ec378319ed14072735da628
/airflow_ml_dags/images/airflow-preprocess/preprocess.py
05baaf7e9e5e09e3529a1dc5307f6adc36f4482e
[]
no_license
made-ml-in-prod-2021/dronovartem
4365c05e67d6dd7bc5d798ad3b814b4687b51171
37711ea51e326f6468303daaa9852d9f1e92e9bf
refs/heads/main
2023-06-04T12:40:39.017311
2021-06-24T18:28:15
2021-06-24T18:28:15
353,791,508
0
1
null
2021-06-24T18:28:16
2021-04-01T18:28:51
Jupyter Notebook
UTF-8
Python
false
false
599
py
import os import pandas as pd import click @click.command("predict") @click.option("--input-dir") @click.option("--output-dir") def preprocess(input_dir: str, output_dir): """ Implement dummy dataset preprocessing. """ data = pd.read_csv(os.path.join(input_dir, "data.csv")) target = pd.read_csv(os.path.join(input_dir, "target.csv")) os.makedirs(output_dir, exist_ok=True) train_data = pd.concat([data, target], axis=1) train_data.to_csv(os.path.join(output_dir, "train_data.csv"), index=False) if __name__ == '__main__': preprocess()
[ "noreply@github.com" ]
made-ml-in-prod-2021.noreply@github.com
eabfb42337373790ebbbc5379ef69a5441583b28
2436f9f911149926af8fd7c1cd5c4e0d604987a0
/puente_quintanavides/pretensa_viga_xci.py
ae3ea59819381584f381dd1432ad80297966424e
[]
no_license
cryptopulgo/XCmodels
d185b93d01f69d52d88d8a5f965c35dc1742da22
bad6e553d919909086ab9045884594b7d8aafc06
refs/heads/master
2022-12-08T21:24:39.152883
2020-08-19T13:25:25
2020-08-19T13:25:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
319
py
# -*- coding: utf-8 def trataResultsPret(nmbComb): mdlr(dom(calculate_nodal_reactions(1)\listaReaccionesNodos(nmbComb,tagsNodosCoartados,"%7.2f","reacc","cabecera","tit"))) \nuevo_archivo_salida["salidaG0"]{"reacciones_G0.tex"} \resuelveCombEstatLin("G0") \trataResultsPret("G0") cierra_archivo_salida("salidaG0")
[ "l.pereztato@gmail.com" ]
l.pereztato@gmail.com
3109ce5a426b3c7a9bbb884949883026d046ef93
781e2692049e87a4256320c76e82a19be257a05d
/all_data/exercism_data/python/grade-school/07ecb8fb9316422683afbe71e7b89cda.py
282651815e49817683861a8da2d0e1bacaaac438
[]
no_license
itsolutionscorp/AutoStyle-Clustering
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
refs/heads/master
2020-12-11T07:27:19.291038
2016-03-16T03:18:00
2016-03-16T03:18:42
59,454,921
4
0
null
2016-05-23T05:40:56
2016-05-23T05:40:56
null
UTF-8
Python
false
false
456
py
from collections import defaultdict class School: def __init__(self, name): self.name = name self.db = defaultdict(set) def add(self, student, grade): self.db[grade].add(student) def grade(self, grade_number): return self.db[grade_number] def sort(self): register = dict() for grade, roster in self.db.items(): register[grade] = tuple(sorted(roster)) return register
[ "rrc@berkeley.edu" ]
rrc@berkeley.edu
859496717de268dc9117c928a2adaa60d197eb67
35fdd5b42b47a1dbe6a25f6fc1865f4e48b842a5
/evalml/tuners/tuner_exceptions.py
a1fecf7101695ca8c098a63dd3fa2cdf08aa4bf7
[ "BSD-3-Clause" ]
permissive
skvorekn/evalml
41e5426f9f7d5ad625c21b74336009894c79c7de
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
refs/heads/main
2023-03-27T01:42:07.691406
2021-03-19T18:53:43
2021-03-19T18:53:43
349,555,689
0
0
BSD-3-Clause
2021-03-21T14:57:01
2021-03-19T21:08:12
null
UTF-8
Python
false
false
274
py
class NoParamsException(Exception): """Raised when a tuner exhausts its search space and runs out of parameters to propose.""" pass class ParameterError(Exception): """Raised when a tuner encounters an error with the parameters being used with it.""" pass
[ "noreply@github.com" ]
skvorekn.noreply@github.com
37f7b34d2d10f3ffdc8b99dc71379799894ec17a
ef6229d281edecbea3faad37830cb1d452d03e5b
/ucsmsdk/mometa/fabric/FabricFcSan.py
31582e89aeb3a2412f8e11f8d85e697845f18cf1
[ "Apache-2.0" ]
permissive
anoop1984/python_sdk
0809be78de32350acc40701d6207631322851010
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
refs/heads/master
2020-12-31T00:18:57.415950
2016-04-26T17:39:38
2016-04-26T17:39:38
57,148,449
0
0
null
null
null
null
UTF-8
Python
false
false
4,240
py
"""This module contains the general information for FabricFcSan ManagedObject.""" import sys, os from ...ucsmo import ManagedObject from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta from ...ucsmeta import VersionMeta class FabricFcSanConsts(): ID_A = "A" ID_B = "B" ID_NONE = "NONE" UPLINK_TRUNKING_DISABLED = "disabled" UPLINK_TRUNKING_ENABLED = "enabled" class FabricFcSan(ManagedObject): """This is FabricFcSan class.""" consts = FabricFcSanConsts() naming_props = set([u'id']) mo_meta = MoMeta("FabricFcSan", "fabricFcSan", "[id]", VersionMeta.Version101e, "InputOutput", 0xff, [], ["admin", "ext-san-config", "ext-san-policy"], [u'fabricSanCloud'], [u'fabricFcSanEp', u'fabricFcSanPc', u'fabricFcoeSanEp', u'fabricFcoeSanPc', u'fabricSubGroup', u'fabricVsan', u'faultInst'], ["Get", "Set"]) prop_meta = { "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []), "config_qualifier": MoPropertyMeta("config_qualifier", "configQualifier", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|vsan-count-exceeds-limit),){0,2}(defaultValue|not-applicable|vsan-count-exceeds-limit){0,1}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []), "id": MoPropertyMeta("id", "id", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x8, None, None, None, ["A", "B", "NONE"], []), "locale": MoPropertyMeta("locale", "locale", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|server|chassis|internal|external),){0,5}(defaultValue|unknown|server|chassis|internal|external){0,1}""", [], []), "name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.CREATE_ONLY, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []), "sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []), "transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []), "type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []), "uplink_trunking": MoPropertyMeta("uplink_trunking", "uplinkTrunking", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["disabled", "enabled"], []), } prop_map = { "childAction": "child_action", "configQualifier": "config_qualifier", "dn": "dn", "id": "id", "locale": "locale", "name": "name", "rn": "rn", "sacl": "sacl", "status": "status", "transport": "transport", "type": "type", "uplinkTrunking": "uplink_trunking", } def __init__(self, parent_mo_or_dn, id, **kwargs): self._dirty_mask = 0 self.id = id self.child_action = None self.config_qualifier = None self.locale = None self.name = None self.sacl = None self.status = None self.transport = None self.type = None self.uplink_trunking = None ManagedObject.__init__(self, "FabricFcSan", parent_mo_or_dn, **kwargs)
[ "test@cisco.com" ]
test@cisco.com
79fe8745abbd0602a0437bb480fabb4ab786264d
682319f56c17e949bab0d6e418838d33977dd760
/Assignment_5/pattern1.py
5c0520857bd5b8c307f2c17dfda35cc732667f4a
[]
no_license
DilipBDabahde/PythonExample
8eb70773a783b1f4b6cf6d7fbd2dc1302af8aa1b
669762a8d9ee81ce79416d74a4b6af1e2fb63865
refs/heads/master
2020-08-23T01:05:44.788080
2020-07-25T21:59:52
2020-07-25T21:59:52
216,511,985
1
1
null
null
null
null
UTF-8
Python
false
false
314
py
''' 2. Write a recursive program which display below pattern. Input : 5 Output : 1 2 3 4 5 ''' i = 1; def pattern(iNo): global i; if i <= iNo: print(i,end= " "); i += 1; pattern(iNo); def main(): val = int(input("Enter a val:")); pattern(val); print(); if __name__ == '__main__': main();
[ "noreply@github.com" ]
DilipBDabahde.noreply@github.com
98fd0ff9ef892db47051df84ce0c6dab4a5c6f62
7d1fd87e1aaf7e6b2ea72ab086a128d03ab059f1
/Python_Flask/Flask1/flask_2.py
3204f2d47e2bdba0604fdd8a5c532a2880633b24
[]
no_license
efren1990/codepy
05fb34fb608d9921cd5b1c257a9869f2d42eafae
1bd957e7a7285d459ba76e99c4bccb8dbabf8da4
refs/heads/main
2023-06-30T06:31:51.154519
2021-08-13T01:30:57
2021-08-13T01:30:57
395,486,511
0
0
null
null
null
null
UTF-8
Python
false
false
964
py
""" QUE ES FLASK---> Flask es un microframework para Python basado en Werkzeug, Jinja 2 y buenas intenciones. Y antes de preguntar: ¡es con licencia BSD! --------------METODO RUN-------------------------------------------------------------- """ # Llamado de la libreria flask la clas Flask from flask import Flask """Para utilizar flask es necesario crear una instacia de la clase Flask El objeto Flask recibe como parametro la constante __name__ """ app = Flask(__name__) @app.route('/') #wrap o decorador def index(): return 'Hola mundo' # run()-> se encarga de ejecutar el servidor # Run puede responder en el puerto que le indiquemos # La cantidad de puertos que tiene nuestra computadora es 2**16 # de los cuales los primeros 1024 ya se encuentran ocupados # debug = true permite realizar cambion sin necesidad de correr la consola en cada cambion # Validacion de buenas practicas flask if __name__ == '__main__': app.run(debug = True, port = 8000)
[ "52503339+incodeEfren@users.noreply.github.com" ]
52503339+incodeEfren@users.noreply.github.com
008dd71fd10f5620479b636d318ed0204bec3d29
cb3119cbe128fc9adcf286d257126d03b0de7a10
/tests/test_net.py
d5498289586178f08151b9c08a1eee33656809fe
[ "MIT" ]
permissive
CBJamo/skidl
a66d14598ec851eb80c1d1fd38df3513aff0acdc
c0e46aa2e75f3be8aefd585c8cbdcbd33a59d542
refs/heads/master
2020-03-09T06:30:26.759951
2018-04-08T12:54:08
2018-04-08T12:54:08
128,640,952
0
0
null
2018-04-08T12:52:59
2018-04-08T12:52:58
null
UTF-8
Python
false
false
412
py
import pytest from skidl import * from .setup_teardown import * def test_nets_1(): gnd = Net('GND') a = Net('A') b = Net('B') c = Net() p = Pin() assert len(default_circuit.get_nets()) == 0 assert len(a) == 0 assert len(b) == 0 assert len(c) == 0 a += p assert len(default_circuit.get_nets()) == 1 assert len(a) == 1 assert len(b) == 0 assert len(c) == 0
[ "devb@xess.com" ]
devb@xess.com
20f5f50e20a6e339fe8174eaf494840bb10d8f6b
a312bf5148945a19bb6d30c1f0e97d3edc021af2
/RestFrameWork/api/serializer.py
d9ff420cf6e881549e5c71899e923275d6171933
[]
no_license
Shikhar0907/Create-API
252cdb632765f5d5b7a03213b942bfc9c5b979de
c8f13f309d0aa668091d615946b52bea66316d36
refs/heads/master
2020-04-03T00:51:57.367335
2018-10-27T01:16:00
2018-10-27T01:16:00
154,911,231
0
0
null
null
null
null
UTF-8
Python
false
false
635
py
from rest_framework import serializers from RestFrameWork.models import Status class StatusSerializer(serializers.ModelSerializer): class Meta: model = Status fields = [ 'id', 'user', 'content', 'image' ] read_only_fields = ['user'] def validate(self, data): content = data.get("content",None) if content == "": content = None image = data.get("image",None) if content is None and image is None: raise(serializers.ValidationError("content or image is not Present")) return(data)
[ "shikharsuman59@gmail.com" ]
shikharsuman59@gmail.com
e2fb33449cb17a9b98a90fba8046aa3d8eddddcf
2324d8e4544a9b813153ce0ed0f858972ea7f909
/66-加一.py
35964e18d54db38d2430e97ff639ad548ef65d3c
[]
no_license
Terry-Ma/Leetcode
af8a4ad8059975f8d12b0351610336f1f5f01097
cc7f41e2fb3ed5734c2a5af97e49a5bc17afbceb
refs/heads/master
2021-08-10T16:40:20.482851
2021-07-03T08:35:56
2021-07-03T08:35:56
225,814,239
0
1
null
null
null
null
UTF-8
Python
false
false
451
py
class Solution: def plusOne(self, digits: List[int]) -> List[int]: result = 0 flag = 1 for i in range(len(digits) - 1, -1, -1): if flag == 1: if digits[i] == 9: digits[i] = 0 else: digits[i] = digits[i] + 1 flag = 0 break if flag == 1: digits.insert(0, 1) return digits
[ "rssmyq@aliyun.com" ]
rssmyq@aliyun.com