blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e5f55207dd9a043e94437287cbd5b94a341aeb9a
|
5ec3dc6d172d758f9f547686b68cbbe903ab3161
|
/test/no_running_jobs_test.py
|
7740a6977d3edacfdbd71a677bae1499bce61a23
|
[] |
no_license
|
dixudx/jenkinsflow
|
ea8bdf4b8abdfb06ab6e05f5c5a83a1c0744f849
|
2c07f8fc2951d9167dcd08ae2e1f6a8afc32f7f5
|
refs/heads/master
| 2020-12-26T00:25:27.092813
| 2015-05-19T19:41:47
| 2015-05-19T19:41:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,964
|
py
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from pytest import raises
from jenkinsflow.flow import serial, JobNotIdleException
from jenkinsflow.mocked import hyperspeed
from .cfg import ApiType
from .framework import api_select
from .framework.utils import assert_lines_in
def test_no_running_jobs(capsys):
with api_select.api(__file__, login=True) as api:
api.flow_job()
api.job('j1', exec_time=50, max_fails=0, expect_invocations=1, expect_order=None, invocation_delay=0, unknown_result=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
sout, _ = capsys.readouterr()
assert_lines_in(sout, "unchecked job: 'jenkinsflow_test__no_running_jobs__j1' UNKNOWN - RUNNING")
# Make sure job has actually started before entering new flow
hyperspeed.sleep(1)
with raises(JobNotIdleException) as exinfo:
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke('j1')
assert "job: 'jenkinsflow_test__no_running_jobs__j1' is in state RUNNING. It must be IDLE." in exinfo.value.message
def test_no_running_jobs_unchecked(capsys):
with api_select.api(__file__, login=True) as api:
api.flow_job()
api.job('j1', exec_time=50, max_fails=0, expect_invocations=1, expect_order=None, invocation_delay=0, unknown_result=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
sout, _ = capsys.readouterr()
assert_lines_in(sout, "unchecked job: 'jenkinsflow_test__no_running_jobs_unchecked__j1' UNKNOWN - RUNNING")
hyperspeed.sleep(1)
with raises(JobNotIdleException) as exinfo:
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
assert "unchecked job: 'jenkinsflow_test__no_running_jobs_unchecked__j1' is in state RUNNING. It must be IDLE." in exinfo.value.message
def test_no_running_jobs_jobs_allowed():
with api_select.api(__file__, login=True) as api:
api.flow_job()
exp_invocations = 2 if api.api_type != ApiType.MOCK else 1
unknown_result = False if api.api_type != ApiType.MOCK else True
api.job('j1', exec_time=20, max_fails=0, expect_invocations=exp_invocations, expect_order=None,
invocation_delay=0, unknown_result=unknown_result)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
hyperspeed.sleep(1)
# TODO
if api.api_type != ApiType.MOCK:
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, require_idle=False) as ctrl1:
ctrl1.invoke('j1')
|
[
"lhn@hupfeldtit.dk"
] |
lhn@hupfeldtit.dk
|
7d3fc3ee1fbadfbfdeae383c58c42296cb0e2128
|
73b158f51285300c1d3456b7af9163939ee206f2
|
/DevOps/sprint03/t00_lambda/expression.py
|
0ddfdb9345aef1a2a08f49f12d1afab8728d3beb
|
[] |
no_license
|
nnocturnnn/DevOps
|
2e332b3552a5b294b36d2af7de854aa18f2da46f
|
173c75938e65be8fbbb5c02c3d655d09df9a2931
|
refs/heads/master
| 2023-06-11T07:21:14.097930
| 2021-06-30T13:58:15
| 2021-06-30T13:58:15
| 352,070,911
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
n = int(input('n: '))
a = int(input('a: '))
b = int(input('b: '))
result = lambda a, b, n : n % a == 0 and n % b == 0
print(result(a,b,n))
|
[
"vikchehovich@gmail.com"
] |
vikchehovich@gmail.com
|
14d21ba34e1b8337cb0439ea712b203c5317703c
|
2a8abd5d6acdc260aff3639bce35ca1e688869e9
|
/telestream_cloud_qc_sdk/telestream_cloud_qc/models/audio_loudness_itest.py
|
387597f5a62e6090eec6343a0872aa4c534e52f5
|
[
"MIT"
] |
permissive
|
Telestream/telestream-cloud-python-sdk
|
57dd2f0422c83531e213f48d87bc0c71f58b5872
|
ce0ad503299661a0f622661359367173c06889fc
|
refs/heads/master
| 2021-01-18T02:17:44.258254
| 2020-04-09T11:36:07
| 2020-04-09T11:36:07
| 49,494,916
| 0
| 0
|
MIT
| 2018-01-22T10:07:49
| 2016-01-12T11:10:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,273
|
py
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class AudioLoudnessItest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'loudness_level': 'float',
'loudness_tolerance': 'float',
'mode': 'LoudnessMode',
'channels': 'Channels',
'reject_on_error': 'bool',
'do_correction': 'bool'
}
attribute_map = {
'loudness_level': 'loudness_level',
'loudness_tolerance': 'loudness_tolerance',
'mode': 'mode',
'channels': 'channels',
'reject_on_error': 'reject_on_error',
'do_correction': 'do_correction'
}
def __init__(self, loudness_level=None, loudness_tolerance=None, mode=None, channels=None, reject_on_error=None, do_correction=None, local_vars_configuration=None): # noqa: E501
"""AudioLoudnessItest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._loudness_level = None
self._loudness_tolerance = None
self._mode = None
self._channels = None
self._reject_on_error = None
self._do_correction = None
self.discriminator = None
if loudness_level is not None:
self.loudness_level = loudness_level
if loudness_tolerance is not None:
self.loudness_tolerance = loudness_tolerance
if mode is not None:
self.mode = mode
if channels is not None:
self.channels = channels
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if do_correction is not None:
self.do_correction = do_correction
@property
def loudness_level(self):
"""Gets the loudness_level of this AudioLoudnessItest. # noqa: E501
:return: The loudness_level of this AudioLoudnessItest. # noqa: E501
:rtype: float
"""
return self._loudness_level
@loudness_level.setter
def loudness_level(self, loudness_level):
"""Sets the loudness_level of this AudioLoudnessItest.
:param loudness_level: The loudness_level of this AudioLoudnessItest. # noqa: E501
:type: float
"""
self._loudness_level = loudness_level
@property
def loudness_tolerance(self):
"""Gets the loudness_tolerance of this AudioLoudnessItest. # noqa: E501
:return: The loudness_tolerance of this AudioLoudnessItest. # noqa: E501
:rtype: float
"""
return self._loudness_tolerance
@loudness_tolerance.setter
def loudness_tolerance(self, loudness_tolerance):
"""Sets the loudness_tolerance of this AudioLoudnessItest.
:param loudness_tolerance: The loudness_tolerance of this AudioLoudnessItest. # noqa: E501
:type: float
"""
self._loudness_tolerance = loudness_tolerance
@property
def mode(self):
"""Gets the mode of this AudioLoudnessItest. # noqa: E501
:return: The mode of this AudioLoudnessItest. # noqa: E501
:rtype: LoudnessMode
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this AudioLoudnessItest.
:param mode: The mode of this AudioLoudnessItest. # noqa: E501
:type: LoudnessMode
"""
self._mode = mode
@property
def channels(self):
"""Gets the channels of this AudioLoudnessItest. # noqa: E501
:return: The channels of this AudioLoudnessItest. # noqa: E501
:rtype: Channels
"""
return self._channels
@channels.setter
def channels(self, channels):
"""Sets the channels of this AudioLoudnessItest.
:param channels: The channels of this AudioLoudnessItest. # noqa: E501
:type: Channels
"""
self._channels = channels
@property
def reject_on_error(self):
"""Gets the reject_on_error of this AudioLoudnessItest. # noqa: E501
:return: The reject_on_error of this AudioLoudnessItest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this AudioLoudnessItest.
:param reject_on_error: The reject_on_error of this AudioLoudnessItest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def do_correction(self):
"""Gets the do_correction of this AudioLoudnessItest. # noqa: E501
:return: The do_correction of this AudioLoudnessItest. # noqa: E501
:rtype: bool
"""
return self._do_correction
@do_correction.setter
def do_correction(self, do_correction):
"""Sets the do_correction of this AudioLoudnessItest.
:param do_correction: The do_correction of this AudioLoudnessItest. # noqa: E501
:type: bool
"""
self._do_correction = do_correction
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AudioLoudnessItest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AudioLoudnessItest):
return True
return self.to_dict() != other.to_dict()
|
[
"cloudsupport@telestream.net"
] |
cloudsupport@telestream.net
|
937175e4b5291adf7936b6fa829b3bbd28c7c340
|
3af2998fe7bc3c48fbe6eae476f7e0ec5bfc0ca6
|
/control_flow/while_loop.py
|
503c2959186fb69a16e04e59916b7b694844032c
|
[] |
no_license
|
east825/python-inference-playground
|
22acb8f2c71eb07e13293a9fec1d67a6f5aa25cf
|
f60387604a1c535ad30b7f3f44acf08cbd7d88c7
|
refs/heads/master
| 2020-06-01T01:29:00.399986
| 2014-04-15T12:28:18
| 2014-04-15T12:28:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
while False:
if True:
a1 = 42
break
else:
a1 = 'spam'
print(a1)
while False:
if True:
a2 = 42
else:
a2 = 'spam'
print(a2)
a3 = 42
while False:
if True:
a3 = 'spam'
print(a3)
while False:
if True:
a4 = 42
else:
a4 = 'spam'
print(a4)
|
[
"mikhail.golubev@jetbrains.com"
] |
mikhail.golubev@jetbrains.com
|
b4571590ec6a3e3ec47fcc2114275054d35df44f
|
d1ddb9e9e75d42986eba239550364cff3d8f5203
|
/google-cloud-sdk/lib/surface/container/builds/describe.py
|
c3386e7ed0435df473c1cbd30730d4657cc15fba
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bopopescu/searchparty
|
8ecd702af0d610a7ad3a8df9c4d448f76f46c450
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
refs/heads/master
| 2022-11-19T14:44:55.421926
| 2017-07-28T14:55:43
| 2017-07-28T14:55:43
| 282,495,798
| 0
| 0
|
Apache-2.0
| 2020-07-25T17:48:53
| 2020-07-25T17:48:52
| null |
UTF-8
|
Python
| false
| false
| 2,193
|
py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe build command."""
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class Describe(base.DescribeCommand):
"""Get information about a particular build."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument(
'build',
completion_resource='cloudbuild.projects.builds',
list_command_path='container builds list --uri',
help=('The build to describe. The ID of the build is printed at the '
'end of the build submission process, or in the ID column when '
'listing builds.'),
)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
client = cloudbuild_util.GetClientInstance()
build_ref = resources.REGISTRY.Parse(
args.build,
params={'projectId': properties.VALUES.core.project.GetOrFail},
collection='cloudbuild.projects.builds')
return client.projects_builds.Get(
client.MESSAGES_MODULE.CloudbuildProjectsBuildsGetRequest(
projectId=build_ref.projectId, id=build_ref.id))
|
[
"vinvivo@users.noreply.github.com"
] |
vinvivo@users.noreply.github.com
|
86cfe8a6f28681768008e205860dc50ea646a073
|
76af5f63e173850a461dd104d696a3ad86958b6d
|
/ObjectDetectionDeps/Generate_Labelmap.py
|
543168988a48df229884cc695dd2deda73776def
|
[] |
no_license
|
Danny-Dasilva/Tensorflow_Object_Detection
|
599b76d86918b1425a8d9e35d6dc5644224e6692
|
b0386dfac730b516594d511849560ff59a2bf979
|
refs/heads/master
| 2022-03-30T11:34:08.595899
| 2020-02-08T16:35:20
| 2020-02-08T16:35:20
| 197,986,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
import os
path = os.environ['IMAGEPATH'] + '/Train_labels.csv'
import csv
col = []
with open(path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
#print(set(row['class']))
col.append(row['class'])
classes = list(set(col))
print(classes)
count = 0
pbtxt_label = open("labelmap.pbtxt","w")
for label in classes:
count += 1
pbtxt_label.write("item {\n")
pbtxt_label.write(" id: %s\n" % (count))
pbtxt_label.write(" name: '%s'\n" % (label))
pbtxt_label.write("}\n")
pbtxt_label.write("\n")
count = 0
txt_label = open("labels.txt","w")
for label in classes:
txt_label.write("%s %s\n" % (count, label))
count += 1
pbtxt_label.close()
txt_label.close()
|
[
"yahchayildasilva@gmail.com"
] |
yahchayildasilva@gmail.com
|
8f608eb54cc99e4c496150d0edecc71a52d4e030
|
3cda2dc11e1b7b96641f61a77b3afde4b93ac43f
|
/nni/experiment/config/utils/__init__.py
|
c4b8b586d0953435188171ce60154e6e190380ee
|
[
"MIT"
] |
permissive
|
Eurus-Holmes/nni
|
6da51c352e721f0241c7fd26fa70a8d7c99ef537
|
b84d25bec15ece54bf1703b1acb15d9f8919f656
|
refs/heads/master
| 2023-08-23T10:45:54.879054
| 2023-08-07T02:39:54
| 2023-08-07T02:39:54
| 163,079,164
| 3
| 2
|
MIT
| 2023-08-07T12:35:54
| 2018-12-25T12:04:16
|
Python
|
UTF-8
|
Python
| false
| false
| 237
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Utility functions for experiment config classes.
Check "public.py" to see which functions you can utilize.
"""
from .public import *
from .internal import *
|
[
"noreply@github.com"
] |
Eurus-Holmes.noreply@github.com
|
891869c00f24639fa8d33f4d0a3dea0f62cc2f18
|
5b1eb22194cb2f4c9df63765f78a6998a6ad3de2
|
/src/helpsys.py
|
ad173e7ce6e17082de7d532ab17840bb10f89ca2
|
[] |
no_license
|
Jawmo/akriosmud
|
85c2ecd520fd15ba86d0210b018055146b9e2192
|
aac434919586f5590f089e8e87e0f2e946a80aa9
|
refs/heads/master
| 2020-10-01T23:26:47.290481
| 2019-12-09T01:11:05
| 2019-12-09T01:11:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,804
|
py
|
#! usr/bin/env python
# Project: Akrios
# Filename: helpsys.py
#
# File Description: Module to handle the help system.
#
# By: Jubelo
from collections import namedtuple
import glob
import logging
import json
import os
import olc
import world
log = logging.getLogger(__name__)
WRITE_NEW_FILE_VERSION = False
# Define some named tuples for various Help file values
Section = namedtuple("Section", "name")
sections = {"player": Section("player"),
"administrative": Section("administrative"),
"builder": Section("builder"),
"deity": Section("deity")}
class Help(olc.Editable):
CLASS_NAME = "__Help__"
FILE_VERSION = 1
def __init__(self, path):
super().__init__()
self.path = path
self.json_version = Help.FILE_VERSION
self.json_class_name = Help.CLASS_NAME
self.builder = None
self.creator = ""
self.viewable = ""
self.keywords = []
self.topics = ""
self.section = ""
self.description = ""
self.commands = {"viewable": ("string", ["true", "false"]),
"creator": ("string", None),
"keywords": ("list", None),
"topics": ("string", None),
"section": ("string", sections),
"description": ("description", None)}
if os.path.exists(path):
self.load()
def to_json(self):
if self.json_version == 1:
jsonable = {"json_version": self.json_version,
"json_class_name": self.json_class_name,
"creator": self.creator,
"viewable": self.viewable,
"keywords": self.keywords,
"topics": self.topics,
"section": self.section,
"description": self.description}
return json.dumps(jsonable, sort_keys=True, indent=4)
def load(self):
log.debug(f"Loading help file: {self.path}")
if self.path.endswith("json"):
with open(self.path, "r") as thefile:
help_file_dict = json.loads(thefile.read())
for eachkey, eachvalue in help_file_dict.items():
setattr(self, eachkey, eachvalue)
def save(self):
with open(f"{self.path}", "w") as thefile:
thefile.write(self.to_json())
def display(self):
return (f"{{BCreator{{x: {self.creator}\n"
f"{{BViewable{{x: {self.viewable}\n"
f"{{BKeywords{{x: {', '.join(self.keywords)}\n"
f"{{BTopics{{x: {self.topics}\n"
f"{{BSection{{x: {self.section}\n"
f" {{y{', '.join(sections)}\n"
f"{{BDescription{{x:\n\r"
f"{self.description[:190]}|...{{x\n\r")
helpfiles = {}
def init():
log.info("Initializing all help files.")
allhelps = glob.glob(os.path.join(world.helpDir, "*.json"))
for singlehelp in allhelps:
thehelp = Help(singlehelp)
for keyword in thehelp.keywords:
helpfiles[keyword] = thehelp
if WRITE_NEW_FILE_VERSION:
thehelp.save()
def reload():
helpfiles = {}
init()
def get_help(key, server=False):
key = key.lower()
if key:
if key in helpfiles:
if helpfiles[key].viewable.lower() == "true" or server:
return helpfiles[key].description
else:
log.warning(f"MISSING HELP FILE: {key}")
return "We do not appear to have a help file for that topic. "\
"We have however logged the attempt and will look into creating "\
"a help file for that topic as soon as possible.\n\r"
|
[
"phippsb@gmail.com"
] |
phippsb@gmail.com
|
6da9faa43719b34fe1f5824aa6c271c993fb4534
|
7ad0ea6e17c6505c419f70b956a06f36b734779b
|
/BizchoolLab/project/urls.py
|
e8a928037905962c68a0c309904a81b359a0d1ac
|
[] |
no_license
|
yoongyo/BizLab
|
34cb2e6386030fb091853d90065063367ae32521
|
dfe5f1e69d6a711e96f0f456f36ecfbccf010892
|
refs/heads/master
| 2020-04-12T13:08:55.782891
| 2018-12-20T02:09:54
| 2018-12-20T02:09:54
| 162,513,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from django.urls import path, re_path
from . import views
urlpatterns = [
re_path(r'^new/$', views.project_new, name="project_new"),
re_path(r'^$', views.project_list, name="project_list"),
re_path(r'^(?P<pk>\d+)/$', views.project_detail, name="project_detail"),
re_path(r'^(?P<pk>\d+)/Edit/$', views.project_edit, name="project_edit"),
]
|
[
"jyg0172@naver.com"
] |
jyg0172@naver.com
|
d90fcac0e12cd0f321dbfa11976d0074cb2a681c
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_75/200.py
|
fffe0758681ce42de20ca4fef4e35391db502cce
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
def checkCase(data):
elements=[]
nonbase=[]
opposed=[]
for i in xrange(0, int(data[0])):
nonbase.append((data[i+1][0],data[i+1][1],data[i+1][2]))
data=data[int(data[0])+1:]
for i in xrange(0, int(data[0])):
opposed.append((data[i+1][0],data[i+1][1]))
data=data[-1]
for cmd in data:
try:
if len(elements) > 0:
for n in nonbase:
if (n[0] == elements[-1] and cmd == n[1]) or (n[1] == elements[-1] and cmd == n[0]):
elements[-1]=n[2]
1/0
for o in opposed:
if (o[0] in elements and cmd == o[1]) or (o[1] in elements and cmd == o[0]):
elements=[]
1/0
elements.append(cmd)
except:
pass
return str(elements).replace("'","")
data=open("B-large.in","r").read()
data=data.splitlines()[1:]
out=open("out.txt","w")
for c in xrange(0, len(data)):
tmp=data[c].split(" ")
out.write("Case #%i: %s\n"%(c+1,checkCase(tmp)))
out.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
f2536c6d3f382ecd5d7c0ab7aa19a39a61db1aff
|
453d2e699d218fdb3bc1e535a707988194ac6717
|
/lib/opengl/postproc/base.py
|
e38f2f67f6835325a7416c113e3b01a34d8e7a81
|
[
"MIT"
] |
permissive
|
defgsus/thegame
|
d54ffcd343c7e1805d2c11e24cd38b02243e73d4
|
38a627d9108f1418b94b08831fd640dd87fbba83
|
refs/heads/master
| 2023-07-23T06:32:40.297591
| 2022-04-11T12:02:32
| 2022-04-11T12:02:32
| 127,875,178
| 1
| 0
|
MIT
| 2023-07-06T22:07:07
| 2018-04-03T08:21:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
from ..RenderSettings import RenderSettings
from ..RenderNode import RenderNode
from ..ScreenQuad import ScreenQuad
from ..core.Shader import Shader
class PostProcNode(RenderNode):
def __init__(self, name):
super().__init__(name)
self.quad = ScreenQuad(name="pp-quad-%s" % self.name)
self.do_compile = True
def release(self):
self.quad.release()
def render(self, rs: RenderSettings, pass_num: int):
if self.do_compile:
self.quad.set_shader_code(self.get_code())
self.do_compile = False
self.quad.drawable.shader.set_uniform("u_tex1", 0)
self.quad.drawable.shader.set_uniform("u_tex2", 1)
self.quad.drawable.shader.set_uniform("u_tex3", 2)
self.quad.drawable.shader.set_uniform("u_tex4", 3)
self.quad.drawable.shader.set_uniform("u_time", rs.time)
self.update_uniforms(self.quad.drawable.shader, rs, pass_num=pass_num)
self.quad.draw(rs.render_width, rs.render_height)
#self.quad.draw_centered(rs.render_width, rs.render_height, rs.render_width, rs.render_height)
def get_code(self):
raise NotImplementedError
def update_uniforms(self, shader: Shader, rs: RenderSettings, pass_num: int):
pass
|
[
"s.berke@netzkolchose.de"
] |
s.berke@netzkolchose.de
|
038243668ac16b39e17fbc3ecc4dfe6eb39856d0
|
2031771d8c226806a0b35c3579af990dd0747e64
|
/pyobjc-framework-Photos/PyObjCTest/test_phphotolibrary.py
|
7174dd32bc9163fff070ec2f446bfa8aa62aa0cf
|
[
"MIT"
] |
permissive
|
GreatFruitOmsk/pyobjc-mirror
|
a146b5363a5e39181f09761087fd854127c07c86
|
4f4cf0e4416ea67240633077e5665f5ed9724140
|
refs/heads/master
| 2018-12-22T12:38:52.382389
| 2018-11-12T09:54:18
| 2018-11-12T09:54:18
| 109,211,701
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
from PyObjCTools.TestSupport import *
import sys
if sys.maxsize > 2 ** 32:
import Photos
class TestPHPhotoLibrary (TestCase):
def testConstants(self):
self.assertEqual(Photos.PHAuthorizationStatusNotDetermined, 0)
self.assertEqual(Photos.PHAuthorizationStatusRestricted, 1)
self.assertEqual(Photos.PHAuthorizationStatusDenied, 2)
self.assertEqual(Photos.PHAuthorizationStatusAuthorized, 3)
@min_sdk_level('10.13')
def testProtocols(self):
objc.protocolNamed('PHPhotoLibraryChangeObserver')
@min_os_level('10.13')
def testMethods(self):
self.assertArgIsBlock(Photos.PHPhotoLibrary.requestAuthorization_, 0, b'v' + objc._C_NSInteger)
self.assertArgIsBlock(Photos.PHPhotoLibrary.performChanges_completionHandler_, 1, b'vZ@')
self.assertArgIsOut(Photos.PHPhotoLibrary.performChangesAndWait_error_, 1)
self.assertResultIsBOOL(Photos.PHPhotoLibrary.performChangesAndWait_error_)
if __name__ == "__main__":
main()
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
30fb72f40582c23a9f9dd19a02d75877810dce08
|
44b9fa8d1113299d327d087da73febf26bef61e7
|
/WINDOW_openMDAO/AEP/FastAEP/farm_energy/wake_model_mean_new/wake_overlap.py
|
ff871ca75c2e2612402c55bb235094bbeda88a94
|
[] |
no_license
|
sebasanper/WINDOW_openMDAO
|
828e6d38546e706d23e4920b1c6e857c6be10825
|
3779fa8380874bc2cd7380df90339b37806a6a60
|
refs/heads/master
| 2023-04-12T22:09:42.951295
| 2023-04-05T08:54:15
| 2023-04-05T08:54:15
| 107,442,976
| 3
| 9
| null | 2019-01-20T16:32:42
| 2017-10-18T17:47:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 789
|
py
|
from math import sqrt
def root_sum_square(array_deficits):
# This is one model, root sum square of individual wind speed deficits.
total_deficit = sqrt(sum([deficit ** 2.0 for deficit in array_deficits]))
return total_deficit
def multiplied(array_deficits):
total_deficit = 1.0
for element in array_deficits:
total_deficit *= element
return total_deficit
def summed(array_deficits):
total_deficit = sum(array_deficits)
if total_deficit > 1.0:
total_deficit = 1.0
return total_deficit
def maximum(array_deficits):
return max(array_deficits)
if __name__ == '__main__':
deficits = [0.3, 0.4]
# print root_sum_square(deficits)
# print multiplied(deficits)
# print summed(deficits)
# print maximum(deficits)
|
[
"s.sanchezperezmoreno@tudelft.nl"
] |
s.sanchezperezmoreno@tudelft.nl
|
a3058160dea228fc765e45cdcec942bd35ec57a9
|
148072ce210ca4754ea4a37d83057e2cf2fdc5a1
|
/src/core/w3af/w3af/core/data/db/tests/test_dbms.py
|
cf0aed0578e3412ae13b214eeeea0442098cd14d
|
[] |
no_license
|
ycc1746582381/webfuzzer
|
8d42fceb55c8682d6c18416b8e7b23f5e430c45f
|
0d9aa35c3218dc58f81c429cae0196e4c8b7d51b
|
refs/heads/master
| 2021-06-14T18:46:59.470232
| 2017-03-14T08:49:27
| 2017-03-14T08:49:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,053
|
py
|
# -*- coding: UTF-8 -*-
"""
Copyright 2013 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
import string
import os
from itertools import repeat, starmap
from random import choice
from w3af.core.data.db.dbms import SQLiteDBMS, get_default_temp_db_instance
from w3af.core.controllers.exceptions import DBException, NoSuchTableException
from w3af.core.controllers.misc.temp_dir import (get_temp_dir,
create_temp_dir,
remove_temp_dir)
def get_temp_filename():
temp_dir = get_temp_dir()
fname = ''.join(starmap(choice, repeat((string.letters,), 18)))
filename = os.path.join(temp_dir, fname + '.w3af.temp_db')
return filename
class TestDBMS(unittest.TestCase):
def setUp(self):
create_temp_dir()
def tearDown(self):
remove_temp_dir()
def test_open_error(self):
invalid_filename = '/'
self.assertRaises(DBException, SQLiteDBMS, invalid_filename)
def test_simple_db(self):
db = SQLiteDBMS(get_temp_filename())
db.create_table('TEST', set([('id', 'INT'), ('data', 'TEXT')])).result()
db.execute('INSERT INTO TEST VALUES (1,"a")').result()
self.assertIn(('1', 'a'), db.select('SELECT * from TEST'))
self.assertEqual(('1', 'a'), db.select_one('SELECT * from TEST'))
def test_select_non_exist_table(self):
db = SQLiteDBMS(get_temp_filename())
self.assertRaises(NoSuchTableException, db.select, 'SELECT * from TEST')
def test_default_db(self):
db = get_default_temp_db_instance()
db.create_table('TEST', set([('id', 'INT'), ('data', 'TEXT')])).result()
db.execute('INSERT INTO TEST VALUES (1,"a")').result()
self.assertIn(('1', 'a'), db.select('SELECT * from TEST'))
self.assertEqual(('1', 'a'), db.select_one('SELECT * from TEST'))
def test_simple_db_with_pk(self):
db = SQLiteDBMS(get_temp_filename())
fr = db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
fr.result()
self.assertEqual([], db.select('SELECT * from TEST'))
def test_drop_table(self):
db = SQLiteDBMS(get_temp_filename())
fr = db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
fr.result()
db.drop_table('TEST').result()
self.assertRaises(DBException, db.drop_table('TEST').result)
def test_simple_db_with_index(self):
db = SQLiteDBMS(get_temp_filename())
fr = db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
fr.result()
db.create_index('TEST', ['data']).result()
self.assertRaises(DBException,
db.create_index('TEST', ['data']).result)
def test_table_exists(self):
db = SQLiteDBMS(get_temp_filename())
self.assertFalse(db.table_exists('TEST'))
db = SQLiteDBMS(get_temp_filename())
db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
self.assertTrue(db.table_exists('TEST'))
def test_close_twice(self):
db = SQLiteDBMS(get_temp_filename())
db.close()
db.close()
class TestDefaultDB(unittest.TestCase):
def test_get_default_temp_db_instance(self):
self.assertEqual(id(get_default_temp_db_instance()),
id(get_default_temp_db_instance()))
|
[
"everping@outlook.com"
] |
everping@outlook.com
|
bd5007f5ea485e97bda7e458055eb62fbd663a8a
|
8520c991dc543f5f4e1efe59ab401824173bb985
|
/457-circular-array-loop/solution.py
|
e31ba44f932755442e14be18a59b1756d31061c2
|
[] |
no_license
|
katryo/leetcode
|
d44f70f2853c4f5ea9a462d022feb0f5436c2236
|
0da45559271d3dba687858b8945b3e361ecc813c
|
refs/heads/master
| 2020-03-24T12:04:53.859047
| 2020-02-18T04:27:55
| 2020-02-18T04:27:55
| 142,703,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
class Solution(object):
def circularArrayLoop(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def next(idx):
return (idx + nums[idx]) % len(nums)
for i in range(len(nums)):
if nums[i] == 0:
continue
pslow = i
pfast = next(pslow)
npfast = next(pfast)
while nums[i] * nums[pfast] > 0 and nums[i] * nums[next(pfast)] > 0:
if pfast == pslow:
if next(pslow) == pslow:
break
return True
pfast = next(next(pfast))
pslow = next(pslow)
j = i
while nums[j] != 0:
nums[j] = 0
nxt = next(j)
j = nxt
return False
# You are given an array of positive and negative integers.
# If a number n at an index is positive, then move forward n steps.
# Conversely, if it's negative (-n), move backward n steps.
# Assume the first element of the array is forward next to the last element,
# and the last element is backward next to the first element. Determine if there is a loop in this array.
# A loop starts and ends at a particular index with more than 1 element along the loop.
# The loop must be "forward" or "backward'.
if __name__ == '__main__':
s = Solution()
print(s.circularArrayLoop([3, 1, 2]))
print(s.circularArrayLoop([-1]))
print(s.circularArrayLoop([2, -1, 1, -2, -2]))
print(s.circularArrayLoop([-2, 1, -1, -2, -2]))
print(s.circularArrayLoop([2, -1, 1, 2, 2]))
print(s.circularArrayLoop([-1, 2]))
|
[
"katoryo55@gmail.com"
] |
katoryo55@gmail.com
|
b728b7a1c74922c4b5ecc77fd20377d3924e6d66
|
0821d92db624dada6bc50887f6e435ef1e1c03e2
|
/norm/common.py
|
f8233b282b34e20c3f2abe8c3bf385be4388f6bb
|
[
"MIT"
] |
permissive
|
jcollie/norm
|
a29a3052705e805ba240232aec1fd6aac59897ba
|
db303b28e4184cae08228d92868f9409c013096a
|
refs/heads/master
| 2021-01-18T04:19:45.679791
| 2013-07-22T22:34:09
| 2013-07-22T22:34:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,735
|
py
|
# Copyright (c) Matt Haggard.
# See LICENSE for details.
from zope.interface import implements
from twisted.internet import defer
from collections import deque, defaultdict
from norm.interface import IAsyncCursor, IRunner, IPool
class BlockingCursor(object):
"""
I wrap a single DB-API2 db cursor in an asynchronous api.
"""
implements(IAsyncCursor)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, sql, params=()):
return defer.maybeDeferred(self.cursor.execute, sql, params)
def fetchone(self):
return defer.maybeDeferred(self.cursor.fetchone)
def fetchall(self):
return defer.maybeDeferred(self.cursor.fetchall)
def lastRowId(self):
return defer.succeed(self.cursor.lastrowid)
def close(self):
return defer.maybeDeferred(self.cursor.close)
class BlockingRunner(object):
"""
I wrap a single DB-API2 db connection in an asynchronous api.
"""
implements(IRunner)
cursorFactory = BlockingCursor
def __init__(self, conn):
"""
@param conn: A synchronous database connection.
"""
self.conn = conn
def runQuery(self, qry, params=()):
return self.runInteraction(self._runQuery, qry, params)
def _runQuery(self, cursor, qry, params):
d = cursor.execute(qry, params)
d.addCallback(lambda _: cursor.fetchall())
return d
def runOperation(self, qry, params=()):
return self.runInteraction(self._runOperation, qry, params)
def _runOperation(self, cursor, qry, params):
return cursor.execute(qry, params)
def runInteraction(self, function, *args, **kwargs):
cursor = self.cursorFactory(self.conn.cursor())
d = defer.maybeDeferred(function, cursor, *args, **kwargs)
d.addCallback(self._commit)
d.addErrback(self._rollback)
return d
def _commit(self, result):
self.conn.commit()
return result
def _rollback(self, result):
self.conn.rollback()
return result
def close(self):
return defer.maybeDeferred(self.conn.close)
class ConnectionPool(object):
implements(IRunner)
db_scheme = None
def __init__(self, pool=None):
self.pool = pool or NextAvailablePool()
def add(self, conn):
self.pool.add(conn)
def runInteraction(self, function, *args, **kwargs):
return self._runWithConn('runInteraction', function, *args, **kwargs)
def runQuery(self, *args, **kwargs):
return self._runWithConn('runQuery', *args, **kwargs)
def runOperation(self, *args, **kwargs):
return self._runWithConn('runOperation', *args, **kwargs)
def _finish(self, result, conn):
self.pool.done(conn)
return result
def _runWithConn(self, name, *args, **kwargs):
d = self.pool.get()
d.addCallback(self._startRunWithConn, name, *args, **kwargs)
return d
def _startRunWithConn(self, conn, name, *args, **kwargs):
m = getattr(conn, name)
d = m(*args, **kwargs)
return d.addBoth(self._finish, conn)
def close(self):
dlist = []
for item in self.pool.list():
dlist.append(defer.maybeDeferred(item.close))
return defer.gatherResults(dlist)
class NextAvailablePool(object):
"""
I give you the next available object in the pool.
"""
implements(IPool)
def __init__(self):
self._options = deque()
self._all_options = []
self._pending = deque()
self._pending_removal = defaultdict(lambda:[])
def add(self, option):
self._options.append(option)
self._all_options.append(option)
self._fulfillNextPending()
def remove(self, option):
try:
self._options.remove(option)
self._all_options.remove(option)
return defer.succeed(option)
except ValueError:
d = defer.Deferred()
self._pending_removal[option].append(d)
return d
def get(self):
d = defer.Deferred()
self._pending.append(d)
self._fulfillNextPending()
return d
def _fulfillNextPending(self):
if self._pending and self._options:
self._pending.popleft().callback(self._options.popleft())
def done(self, option):
if option in self._pending_removal:
dlist = self._pending_removal.pop(option)
map(lambda d: d.callback(option), dlist)
return
self._options.append(option)
self._fulfillNextPending()
def list(self):
return self._all_options
|
[
"haggardii@gmail.com"
] |
haggardii@gmail.com
|
53f105e9a16c218d5698c35ab3d888d4d9d69c58
|
9baa9f1bedf7bc973f26ab37c9b3046824b80ca7
|
/venv-bck/bin/easy_install
|
f306fde1138489c4a226dd5e0a062fb6a8fad8e7
|
[] |
no_license
|
shakthydoss/suriyan
|
58774fc5de1de0a9f9975c2ee3a98900e0a5dff4
|
8e39eb2e65cc6c6551fc165b422b46d598cc54b8
|
refs/heads/master
| 2020-04-12T05:36:59.957153
| 2017-01-08T06:12:13
| 2017-01-08T06:12:13
| 59,631,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
#!/Users/saksekar/suriyan/venv/bin/python
# -*- coding: utf-8 -*-
import sys
import re
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"shakthydoss@gmail.com"
] |
shakthydoss@gmail.com
|
|
311069543284b2bc146f63a4419a6b1c1c2286b8
|
08607218396a0269a90e8b4e6d099a5e99e39a8b
|
/database/schemes/easyTest/script/testCase/U商城项目/U商城管理端/站点设置/友情链接/worm_1482819508/友情链接.py
|
a743fe0cfcbaa179d5cb2864b7ab079e770d7400
|
[
"MIT"
] |
permissive
|
TonnaMajesty/test
|
4a07297557669f98eeb9f94b177a02a4af6f1af0
|
68b24d1f3e8b4d6154c9d896a7fa3e2f99b49a6f
|
refs/heads/master
| 2021-01-19T22:52:18.309061
| 2017-03-06T10:51:05
| 2017-03-06T10:51:05
| 83,779,681
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,227
|
py
|
# coding=utf-8
from time import sleep, time
from SRC.common.decorator import codeException_dec
from SRC.unittest.case import TestCase
from script.common import utils
class EasyCase(TestCase):
def __init__(self, webDriver, paramsList):
# 请不要修改该方法
super(EasyCase, self).__init__(webDriver, paramsList)
@codeException_dec('3')
def runTest(self):
driver = self.getDriver()
param = self.param
tool = utils
'''
##################################################################
浏览器驱动:driver
例如:
driver.get('http://www.demo.com')
driver.find_element_by_id("kw","输入框").send_keys("Remote")
driver.find_elements_by_id("su","查找")[0].click()
参数化:param
说明:
需要进行参数化的数据,用param.id 替换,id为参数化配置文件中的id值
自定义工具模块:tool 文件所在路径script/common/utils.py
开发人员可根据需要自行添加新的函数
例如:
获取一个随机生成的字符串:number=tool.randomStr(6)
##################################################################
该方法内进行测试用例的编写
'''
# driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/ul[7]/li[1]/upmark').click(); # 点击站点设置
driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/ul[7]/li[10]/a').click() # 点击友情链接
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[2]/div/a').click() # 点击新增
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[1]/div/input').send_keys(u'你想去哪?') # 输入链接名称
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[2]/div/input').send_keys('demo.upmall.yonyouup.com') # 输入链接URL
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[5]/div/a').click() # 点击上传
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[5]/div/input').send_keys('E:\\tupian\\hhhhhh.jpg') # 上传图片
#os.system("E:\\pythonScript\\autoit\\guanbi.au4.exe") # 调用guanbi.exe程序关闭windows窗口
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[2]/div/button[2]').click() # 点击确定
#driver.find_elements_by_xpath('//a[@class="colorblue"]')[0].click(); # 点击编辑
driver.find_element_by_css_selector("body > div.container.corp-page.ng-scope > div > div.col-xs-10.corp-content > div > div:nth-child(3) > div > table > tbody > tr:nth-child(1) > td.text-center > a:nth-child(1)").click() # 点击编辑
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[2]/div/button[2]').click() # 点击确定
#driver.find_elements_by_xpath('//a[@class="colorblue"]')[1].click(); # 点击删除
driver.find_element_by_css_selector("body > div.container.corp-page.ng-scope > div > div.col-xs-10.corp-content > div > div:nth-child(3) > div > table > tbody > tr:nth-child(1) > td.text-center > a:nth-child(2)").click() # 点击删除
driver.find_element_by_css_selector("body > div.modal.fade.ng-isolate-scope.in > div > div > div.modal-footer.ng-scope > button:nth-child(1)").click() # 点击确定
sleep(3)
|
[
"1367441805@qq.com"
] |
1367441805@qq.com
|
49c9b831d7494a17b8b9e2e2a8847fe9fb7f86e6
|
f928edfc876d715159521589a22485d9de45cc89
|
/import_hourly_csv_to_mariadb_09.py
|
666d2da36d65dde7c7db69c75b28ea5fa5820375
|
[] |
no_license
|
guitar79/AirKorea_Python
|
cd06432740e0b292ca6ad3cde7144717967f5190
|
8077eaa0b6c444d575a25c7f7b992477a36c8294
|
refs/heads/master
| 2020-08-05T01:05:47.209200
| 2019-10-28T03:36:14
| 2019-10-28T03:36:14
| 212,342,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,349
|
py
|
'''
-*- coding: utf-8 -*-
Auther guitar79@naver.com
'''
#import numpy as np
import os
import pymysql
from datetime import datetime
#import warning
#import time
start_time=str(datetime.now())
#mariaDB info
db_host = '10.114.0.121'
db_user = 'modis'
db_pass = 'rudrlrhkgkrrh'
db_name = 'AirKorea'
tb_name = 'hourly_vc'
#base directory
drbase = '/media/guitar79/8T/RS_data/Remote_Sensing/2017RNE/airkorea/csv1/'
#db connect
conn= pymysql.connect(host=db_host, user=db_user, password=db_pass, db=db_name,\
charset='utf8mb4', local_infile=1, cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\
SET time_zone = \"+00:00\";")
cur.execute("DROP TABLE IF EXISTS `%s`;" %(tb_name))
cur.execute("DROP TABLE IF EXISTS `Obs_info`;")
cur.execute("CREATE TABLE IF NOT EXISTS `Obs_info` (\
`Ocode` int(6) NOT NULL,\
`Oname` varchar(12) NOT NULL,\
`Region` varchar(20) NOT NULL,\
`Address` varchar(500) DEFAULT NULL,\
`Lat` float DEFAULT NULL,\
`Lon` float DEFAULT NULL,\
`Alt` float DEFAULT NULL,\
`Remarks` char(255) DEFAULT NULL,\
PRIMARY KEY (`Ocode`))\
ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;")
cur.execute("CREATE TABLE IF NOT EXISTS `%s` (\
`Region` varchar(20) DEFAULT NULL,\
`Ocode` int(6) NOT NULL,\
`Oname` varchar(12) DEFAULT NULL,\
`Otime` int(12) NOT NULL,\
`SO2` float DEFAULT NULL,\
`CO` float DEFAULT NULL,\
`O3` float DEFAULT NULL,\
`NO2` float DEFAULT NULL,\
`PM10` int(4) DEFAULT NULL,\
`PM25` int(4) DEFAULT NULL,\
`Address` varchar(200) DEFAULT NULL,\
`id` int(11) NOT NULL AUTO_INCREMENT PRIMARY KEY,\
CONSTRAINT FK_Ocode FOREIGN KEY (`Ocode`) REFERENCES Obs_info(`Ocode`)\
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;"\
%(tb_name))
'''
cur.execute("CREATE TABLE IF NOT EXISTS `%s` (\
`Ocode` int(6) NOT NULL,\
`Otime` int(12) NOT NULL,\
`SO2` float DEFAULT NULL,\
`CO` float DEFAULT NULL,\
`O3` float DEFAULT NULL,\
`NO2` float DEFAULT NULL,\
`PM10` int(4) DEFAULT NULL,\
`PM25` int(4) DEFAULT NULL,\
`id` int(11) NOT NULL AUTO_INCREMENT,\
PRIMARY KEY (`id`),\
CONSTRAINT FK_Ocode FOREIGN KEY (`Ocode`) REFERENCES Obs_info(`Ocode`)\
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;"\
%(tb_name))
'''
cur.execute("ALTER TABLE `%s`\
MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;" %(tb_name))
#delete all data in the table
print("TRUNCATE TABLE %s;" %(tb_name))
cur.execute("TRUNCATE TABLE %s;" %(tb_name))
conn.commit()
#log file
insert_log = open(drbase+'hourly_import_result.log', 'a')
error_log = open(drbase+'hourly_import_error.log', 'a')
for i in sorted(os.listdir(drbase),reverse=True):
#read csv files
if i[-4:] == '.csv':
print(i)
try :
print("LOAD DATA LOCAL \
INFILE '%s%s' \
INTO TABLE %s.%s \
FIELDS TERMINATED BY '\|' \
ENCLOSED BY '\"' \
LINES TERMINATED BY '\\n'\
IGNORE 1 LINES \
(`Region`, `Ocode`, `Oname`, `Otime`, \
`SO2`, `CO`, `O3`, `NO2`, `PM10`, `PM25`, `Address`);"\
%(drbase,i,db_name,tb_name))
cur.execute("LOAD DATA LOCAL \
INFILE '%s%s' \
INTO TABLE %s.%s \
FIELDS TERMINATED BY '\|' \
ENCLOSED BY '\"' \
LINES TERMINATED BY '\\n'\
IGNORE 1 LINES \
(`Region`, `Ocode`, `Oname`, `Otime`, \
`SO2`, `CO`, `O3`, `NO2`, `PM10`, `PM25`, `Address`);"\
%(drbase,i,db_name,tb_name))
conn.commit()
insert_log.write(drbase+i+" is inserted to the %s - %s\n"\
%(tb_name, datetime.now()))
except :
print(drbase+i+" is error : %s - %s\n"\
%(tb_name, datetime.now()))
error_log.write(drbase+i+" is error : %s - %s\n"\
%(tb_name, datetime.now()))
insert_log.close()
error_log.close()
print("CHECK TABLE %s.%s;" %(db_name, tb_name))
cur.execute("CHECK TABLE %s.%s;" %(db_name, tb_name))
conn.commit()
print("ALTER TABLE %s.%s ENGINE = InnoDB;" %(db_name, tb_name))
cur.execute("ALTER TABLE %s.%s ENGINE = InnoDB;" %(db_name, tb_name))
conn.commit()
print("OPTIMIZE TABLE %s.%s;" %(db_name, tb_name))
cur.execute("OPTIMIZE TABLE %s.%s;" %(db_name, tb_name))
conn.commit()
'''
print("FLUSH TABLE %s.%s;" %(db_name, tb_name))
cur.execute("FLUSH TABLE %s.%s;" %(db_name, tb_name))
conn.commit()
'''
cur.close()
end_time = str(datetime.now())
print("start : "+ start_time+" end: "+end_time)
'''
http://localhost/phpMyAdmin/sql.php?db=AirKorea&table=houly_vc&back=tbl_operations.php&goto=tbl_operations.php&sql_query=ALTER+TABLE+%60houly_vc%60+ENGINE+%3D+InnoDB%3B&token=746c2350251eec3ab8bef717286d7272
'''
|
[
"noreply@github.com"
] |
guitar79.noreply@github.com
|
6d5d2be5a463e58fc1862feabe2bcc443fce727b
|
f07391f481150ad07cd5652a7b09cf1cd60d345f
|
/cmsplugin_container/cms_plugins.py
|
ee489a8a5b52902cedc985968117762177b4c1a3
|
[] |
no_license
|
django-cms-plugins/django-cmsplugin-container
|
39dc956d1b7aa29132c0c841aa1d187da779e568
|
c35d7111a6bd2c73de3d5df6a673497214df8e76
|
refs/heads/master
| 2021-01-21T15:07:12.658207
| 2013-07-23T14:56:19
| 2013-07-23T14:56:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
#-*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from cms.models import CMSPlugin
from cmsplugin_container.models import Container, Grid
from cmsplugin_container.forms import ContainerForm
class ContainerPlugin(CMSPluginBase):
model = Container
module = _("C")
name = _("Multi Columns")
render_template = "cms/plugins/container.html"
allow_children = True
child_classes = ["ColumnPlugin"]
form = ContainerForm
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder':placeholder,
})
return context
def save_model(self, request, obj, form, change):
response = super(MultiColumnPlugin, self).save_model(request, obj, form, change)
for x in xrange(int(form.cleaned_data['create'])):
col = Column(parent=obj, placeholder=obj.placeholder, language=obj.language, width=form.cleaned_data['create_width'], position=CMSPlugin.objects.filter(parent=obj).count(), plugin_type=ColumnPlugin.__name__)
col.save()
return response
class ColumnPlugin(CMSPluginBase):
model = Column
module = _("Multi Columns")
name = _("Column")
render_template = "cms/plugins/column.html"
#frontend_edit_template = 'cms/plugins/column_edit.html'
allow_children = True
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder':placeholder,
})
return context
plugin_pool.register_plugin(MultiColumnPlugin)
plugin_pool.register_plugin(ColumnPlugin)
|
[
"jacob.rief@gmail.com"
] |
jacob.rief@gmail.com
|
ae1e8444b7e83511169be63c369f1ce2d53da1bd
|
f9462f3768fa058bd895a56b151da694664ce588
|
/examples/713_no-op.py
|
1a3dcf7281703f179d38d40bd7d138b5afd82c90
|
[
"MIT"
] |
permissive
|
ryanpennings/workshop_swinburne_2021
|
16a9a7e2c7134832f8f714b7b430376f1b67dfb2
|
820ef4e36e73ac950f40e1846739087180af2e1c
|
refs/heads/main
| 2023-05-31T16:35:16.535310
| 2021-06-17T06:24:51
| 2021-06-17T06:24:51
| 377,373,107
| 0
| 0
|
MIT
| 2021-06-17T06:24:51
| 2021-06-16T04:45:02
| null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
import compas_rrc as rrc
if __name__ == '__main__':
# Create Ros Client
ros = rrc.RosClient()
ros.run()
# Create ABB Client
abb = rrc.AbbClient(ros, '/rob1')
print('Connected.')
# No operation
done = abb.send_and_wait(rrc.Noop())
# Print feedback
print('Feedback = ', done)
# End of Code
print('Finished')
# Close client
ros.close()
ros.terminate()
|
[
"casas@arch.ethz.ch"
] |
casas@arch.ethz.ch
|
eb5c2010387158948bc1e2996332dbd8a800d330
|
17bdb906c2c0886d6451b55ac84633d416d5c50a
|
/chapter_one/test_list.py
|
28308ca52a00ce387a5426c39769e05cde52ba57
|
[
"MIT"
] |
permissive
|
vyahello/unittest-bootcamp
|
10053994dc834720b76df90a37d4756a6f1437c7
|
af24c5c00032ab7265a0c00da5955a26d25dff33
|
refs/heads/master
| 2021-07-17T03:42:30.058662
| 2020-05-09T22:21:17
| 2020-05-09T22:21:17
| 139,250,120
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
from typing import List
from unittest import TestCase
class TestListMethods(TestCase):
""" This test case is verifying basic list data type methods. """
def test_append(self) -> None:
""" Test checks if given elements are adding to array """
flist: List[...] = []
for i in range(1, 4):
flist.append(i)
self.assertEqual(flist, [1, 2, 3])
def test_extend(self) -> None:
""" Test checks if given elements extends an array """
flist: List[int] = [1, 2, 3]
flist.extend(range(4, 6))
self.assertEqual(flist[-2:], [4, 5])
def test_insert(self) -> None:
""" Test checks if given element is inserted into array """
flist: List[int] = [1, 2, 3]
flist.insert(3, 4)
self.assertEqual(flist, [1, 2, 3, 4])
def test_pop(self) -> None:
""" Test checks if given element is deleted from an array """
flist: List[int] = [1, 2, 3]
flist.pop(1)
self.assertEqual(flist, [1, 3])
|
[
"vyahello@gmail.com"
] |
vyahello@gmail.com
|
86047464007e688dadd3b3c27012b467b686a566
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/api/optimizer/reader.py
|
ebbceb27d15008ded7a2c8cd080b7547fb67cd48
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903
| 2023-09-04T11:17:50
| 2023-09-04T11:17:50
| 383,138,186
| 42
| 312
| null | 2023-09-13T11:13:35
| 2021-07-05T12:44:59
|
Python
|
UTF-8
|
Python
| false
| false
| 222
|
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
reader
"""
import numpy as np
reader = np.random.random(size=[1, 1, 10])
reader_img = np.random.random(size=[1, 3, 16, 16])
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
5679e7a79c5e5694fc959140e9c696630b307830
|
2a6dbece45c391e6dc3d28f04c7c02b18d17b24b
|
/myapi/views.py
|
e7798b0b48cf6abbd44a3179bd4bbeac4e5ba3e6
|
[] |
no_license
|
KassadReda/Rest_API_Blender
|
3b1e43b2a488541d8d8a9aa9f95a39c4e70c34ae
|
ee9e5216462902a5cfee98a5502b4580b3af12e6
|
refs/heads/main
| 2023-04-17T06:35:49.204734
| 2021-05-03T22:45:52
| 2021-05-03T22:45:52
| 364,080,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
"""
class BlenderViewSet
this class define how to display a model.
by Reda
"""
# coding=utf-8
from django.shortcuts import render
from rest_framework import viewsets
from .serializers import BlenderModelSerializer
from .models import BlenderModel
# Create your views here.
class BlenderViewSet(viewsets.ModelViewSet) :
queryset = BlenderModel.objects.all().order_by('name')
serializer_class = BlenderModelSerializer
#serialise the uploaded file
def file(self, request,pk=None) :
blenderModel= self.get_object()
file = blenderModel.file
serializer = BlenderModelSerializer(file, data=request.data)
|
[
"="
] |
=
|
d7e35795109593422c728043090178b3c899e3ec
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/65/usersdata/159/31031/submittedfiles/investimento.py
|
5b61ed3d81f3cbef16ed56e9d8cb401d0f95499a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a=float(input('Valor do investimento'))
x=float(input(''))
b=a+(x*a)
print('%.2f' %b)
c=b+(x*b)
print('%.2f' %c)
d=c+(x*c)
print('%.2f' %d)
e=d+(x*d)
print('%.2f' %d)
f=e+(x*e)
print('%.2f' %f)
g=f+(x*f)
print('%.2f' %g)
h=g+(x*g)
print('%.2f' %h)
i=h+(x*h)
print('%.2f' %i)
j=i+(x*i)
print('%.2f' %j)
l=j+(x*j)
print('%.2f' %l)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4062480923890be48ce91948af01567a73be9bed
|
6573a45c4389688666821621c26a5a95a0765e4d
|
/archived_envs/20190625_100746/bin/google-oauthlib-tool
|
ed107688f59c3ccd9c6f360932ed99b926c0aff3
|
[] |
no_license
|
ilhmndn/Warehouse-Management-System-With-Frappe
|
66a41be2286dbdb556ab51a4788fc42987d6ed2e
|
bd9864c5a04a6e2f2f625a8755fba3df4b6409be
|
refs/heads/master
| 2022-10-23T11:13:57.810948
| 2019-07-02T05:18:19
| 2019-07-02T05:18:19
| 194,467,571
| 2
| 2
| null | 2022-10-15T16:16:10
| 2019-06-30T02:40:05
|
Python
|
UTF-8
|
Python
| false
| false
| 264
|
#!/home/ilhmndn/frappe-training/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from google_auth_oauthlib.tool.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ilhmndn@localhost.localdomain"
] |
ilhmndn@localhost.localdomain
|
|
c82d3a37fc944204f5db277b2c98979ab8efef44
|
76d4430567b68151df1855f45ea4408f9bebe025
|
/test/functional/test_framework/coverage.py
|
7f4c1c66546c66896b4314f57e91dcf935f48336
|
[
"MIT"
] |
permissive
|
MicroBitcoinOrg/MicroBitcoin
|
f761b2ff04bdcb650d7c0ddbef431ef95cd69541
|
db7911968445606bf8899903322d5d818d393d88
|
refs/heads/master
| 2022-12-27T10:04:21.040945
| 2022-12-18T05:05:17
| 2022-12-18T05:05:17
| 132,959,214
| 21
| 33
|
MIT
| 2020-06-12T04:38:45
| 2018-05-10T22:07:51
|
C++
|
UTF-8
|
Python
| false
| false
| 3,386
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The MicroBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `micro-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
|
[
"iamstenman@protonmail.com"
] |
iamstenman@protonmail.com
|
19c37356466ad944b8cb042d417054ce008b1f64
|
17bdf40c2bbdf3dd09bf0fa683d471f4e07159fd
|
/asymmetric_jwt_auth/apps.py
|
be5200a58c04ac73f83aa2863dfef64592b567c1
|
[
"ISC"
] |
permissive
|
chiranthsiddappa/asymmetric_jwt_auth
|
c8c9f0a11b36994b72c87f2d834189df94ef6fee
|
a95d28ba61e38395da483243a6f536bf25a41e74
|
refs/heads/master
| 2020-12-25T17:56:18.972703
| 2016-05-24T05:16:06
| 2016-05-24T05:16:06
| 59,540,392
| 0
| 0
| null | 2016-05-24T04:30:18
| 2016-05-24T04:30:18
| null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
from django.apps import AppConfig
class JWTAuthConfig(AppConfig):
name = 'asymmetric_jwt_auth'
verbose_name = "Asymmetric Key Authentication"
|
[
"crgwbr@gmail.com"
] |
crgwbr@gmail.com
|
3018d83ac2e45b567d543161d4efa6c95141ef00
|
f45cc0049cd6c3a2b25de0e9bbc80c25c113a356
|
/LeetCode/石子游戏/5611. 石子游戏 VI.py
|
ea53be09b91192af8790730394fd8bcd26bf5197
|
[] |
no_license
|
yiming1012/MyLeetCode
|
4a387d024969bfd1cdccd4f581051a6e4104891a
|
e43ee86c5a8cdb808da09b4b6138e10275abadb5
|
refs/heads/master
| 2023-06-17T06:43:13.854862
| 2021-07-15T08:54:07
| 2021-07-15T08:54:07
| 261,663,876
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,385
|
py
|
"""
5611. 石子游戏 VI
Alice 和 Bob 轮流玩一个游戏,Alice 先手。
一堆石子里总共有 n 个石子,轮到某个玩家时,他可以 移出 一个石子并得到这个石子的价值。Alice 和 Bob 对石子价值有 不一样的的评判标准 。
给你两个长度为 n 的整数数组 aliceValues 和 bobValues 。aliceValues[i] 和 bobValues[i] 分别表示 Alice 和 Bob 认为第 i 个石子的价值。
所有石子都被取完后,得分较高的人为胜者。如果两个玩家得分相同,那么为平局。两位玩家都会采用 最优策略 进行游戏。
请你推断游戏的结果,用如下的方式表示:
如果 Alice 赢,返回 1 。
如果 Bob 赢,返回 -1 。
如果游戏平局,返回 0 。
示例 1:
输入:aliceValues = [1,3], bobValues = [2,1]
输出:1
解释:
如果 Alice 拿石子 1 (下标从 0开始),那么 Alice 可以得到 3 分。
Bob 只能选择石子 0 ,得到 2 分。
Alice 获胜。
示例 2:
输入:aliceValues = [1,2], bobValues = [3,1]
输出:0
解释:
Alice 拿石子 0 , Bob 拿石子 1 ,他们得分都为 1 分。
打平。
示例 3:
输入:aliceValues = [2,4,3], bobValues = [1,6,7]
输出:-1
解释:
不管 Alice 怎么操作,Bob 都可以得到比 Alice 更高的得分。
比方说,Alice 拿石子 1 ,Bob 拿石子 2 , Alice 拿石子 0 ,Alice 会得到 6 分而 Bob 得分为 7 分。
Bob 会获胜。
提示:
n == aliceValues.length == bobValues.length
1 <= n <= 105
1 <= aliceValues[i], bobValues[i] <= 100
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/stone-game-vi
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
"""
贪心做法:
证明:
假设只有两个石头,对于 a, b 的价值分别是 a1, a2, b1, b2
第一种方案是A取第一个,B取第二个,A与B的价值差是 c1 = a1 - b2
第二种方案是A取第二个,B取第一个,A与B的价值差是 c2 = a2 - b1
那么这两种方案对于A来说哪一种更优,就取决于两个方案的价值差的比较
记 c = c1 - c2 = (a1 - b2) - (a2 - b1) = (a1 + b1) - (a2 + b2)
如果c > 0 那么方案一更优,如果c == 0,那么两种方案价值一样,如果c < 0那么方案二更优
那么比较两个方案的优劣 == 比较 a1 + b1 与 a2 + b2 的优劣 ,
归纳一下就是比较每个下标 i 的 a[i] + b[i] 的优劣
所以贪心的策略:将两组石头的价值合并,每次取价值最大的那一组。
写法:先将两个数组的价值合并,并用下标去标记
对价值排序,A取偶数下标,B取奇数下标,最后比较A,B的价值总和
"""
class Solution:
def stoneGameVI(self, a: List[int], b: List[int]) -> int:
arr = list(zip(a, b))
arr.sort(key=lambda x: x[0] + x[1], reverse=True)
n = len(a)
res_a, res_b = 0, 0
for i in range(n):
if i & 1 == 0:
res_a += arr[i][0]
else:
res_b += arr[i][1]
if res_a > res_b:
return 1
elif res_a < res_b:
return -1
else:
return 0
if __name__ == '__main__':
aliceValues = [1, 3]
bobValues = [2, 1]
print(Solution().stoneGameVI(aliceValues, bobValues))
|
[
"1129079384@qq.com"
] |
1129079384@qq.com
|
58c456b9e168ba17eb5cc5d6e3bc8715df702e0d
|
f4dd8aa4e5476ffde24e27273dd47913c7f9177a
|
/Dlv2_safe2/tests/parser/edbidb.2.test.py
|
7ca82d647d28036317512550cf746da486a374b1
|
[
"Apache-2.0"
] |
permissive
|
dave90/Dlv_safe2
|
e56071ec1b07c45defda571cb721852e2391abfb
|
f127f413e3f35d599554e64aaa918bc1629985bc
|
refs/heads/master
| 2020-05-30T10:44:13.473537
| 2015-07-12T12:35:22
| 2015-07-12T12:35:22
| 38,256,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
input = """
g(3,4).
g(4,1).
h(X,Y):- h(Y,X).
g(X,Y):- h(X,Z), g(Z,Y).
h(3,4).
g(5,2).
"""
output = """
g(3,4).
g(4,1).
h(X,Y):- h(Y,X).
g(X,Y):- h(X,Z), g(Z,Y).
h(3,4).
g(5,2).
"""
|
[
"davide@davide-All-Series"
] |
davide@davide-All-Series
|
39488c26270cabe7fb0720f02e7f86e06baa8868
|
db5264994305e8c926f89cb456f33bd3a4d64f76
|
/Sklep zielarski/orders/migrations/0001_initial.py
|
c21bd1168a45aeac59c66f7e35c2afffd875dd47
|
[] |
no_license
|
marcinpelszyk/Django
|
7842e20d5e8b213c4cd42c421c1db9ab7d5f01d5
|
aff2b9bd20e978a22a4a98994bf8424892d3c82f
|
refs/heads/main
| 2023-05-01T19:20:37.267010
| 2021-05-18T17:51:53
| 2021-05-18T17:51:53
| 356,532,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
# Generated by Django 3.1.7 on 2021-05-08 19:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('store', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('address1', models.CharField(max_length=250)),
('address2', models.CharField(max_length=250)),
('city', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('post_code', models.CharField(max_length=20)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('total_paid', models.DecimalField(decimal_places=2, max_digits=5)),
('order_key', models.CharField(max_length=200)),
('billing_status', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='store.product')),
],
),
]
|
[
"marcin.pelszyk90@gmail.com"
] |
marcin.pelszyk90@gmail.com
|
7a2d804bfeae7d288dc2c166ea105a91da40ca97
|
3cd19164c17d9793ea506369454b8bacd5cebfa9
|
/Backend/clubmg_bak_20190726/clubserver/urls.py
|
48019cb3399c4c52f203ca02e80992ee2532ec11
|
[] |
no_license
|
Myxg/BadmintonClubSystem
|
337a17728122ab929d37e7f2732850beb49d8be0
|
1c51236098ab3770cadd925212f9d3978ed83c2a
|
refs/heads/master
| 2022-12-27T10:13:55.129630
| 2020-09-16T10:21:36
| 2020-09-16T10:21:36
| 295,998,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,500
|
py
|
#coding: utf-8
from django.conf.urls import include, url
from rest_framework_jwt.views import obtain_jwt_token
from . import views
urlpatterns = [
url(r'^token-auth$', obtain_jwt_token),
url(r'^useradd$', views.UserAdd.as_view()),
url(r'^user$', views.UserView.as_view()),
url(r'^user/password$', views.UpdatePassword.as_view()),
url(r'^user/email$', views.UpdateEmail.as_view()),
url(r'^user/photo$', views.UpdatePhoto.as_view()),
url(r'^user/(?P<user_id>[0-9]+)$', views.EditUserView.as_view()),
url(r'^users$', views.UsersView.as_view()),
url(r'^group/(?P<pk_id>[0-9]+)$', views.GroupView.as_view()),
url(r'^groups$', views.GroupsView.as_view()),
url(r'^operations$', views.OperationsView.as_view()),
url(r'^permissions$', views.PermissionsView.as_view()),
url(r'^athlete/(?P<pk_id>[0-9]+)$', views.AthleteView.as_view()),
url(r'^athletes$', views.AthletesView.as_view()),
url(r'^athlete/company/(?P<pk_id>[0-9]+)$', views.AthleteCompanyView.as_view()),
url(r'^athlete/companys$', views.AthleteCompanysView.as_view()),
url(r'^athlete/sportevent/(?P<pk_id>[0-9]+)$', views.SportEventExpView.as_view()),
url(r'^athlete/group/(?P<pk_id>[0-9]+)$', views.AthleteGroupView.as_view()),
url(r'^athlete/groups$', views.AthleteGroupsView.as_view()),
url(r'^athlete/fitness/items$', views.FitnessItemsView.as_view()),
url(r'^athlete/fitness/datas$', views.FitnessDatasView.as_view()),
url(r'^athlete/fitness/data/(?P<pk_id>[0-9]+)$', views.FitnessDataView.as_view()),
url(r'^athlete/worldrankinglist$', views.WorldRankingListView.as_view()),
url(r'^athlete/worldranking/(?P<pk_id>[0-9]+)$', views.WorldRankingView.as_view()),
url(r'^athlete/olympicrankinglist$', views.OlympicRankingListView.as_view()),
url(r'^athlete/olympicranking/(?P<pk_id>[0-9]+)$', views.OlympicRankingView.as_view()),
url(r'^athlete/overview/(?P<pk_id>[0-9]+)$', views.AthleteOverViewView.as_view()),
url(r'^athlete/linkdocs/(?P<pk_id>[0-9]+)$', views.AthleteDocLinkView.as_view()),
url(r'^athlete/matchs/(?P<pk_id>[0-9]+)$', views.AthleteMatchVideosSearchView.as_view()),
url(r'^video/(?P<pk_id>[0-9]+)$', views.MatchVideoView.as_view()),
url(r'^videos$', views.MatchVideosSearchView.as_view()),
url(r'^matchinfo/(?P<pk_id>[0-9]+)$', views.MatchInfoView.as_view()),
url(r'^matchinfos$', views.MatchInfosView.as_view()),
url(r'^matchlist$', views.MatchListView.as_view()),
url(r'^matchlevel2list$', views.MatchLevel2NameView.as_view()),
url(r'^markdata/matchinfos$', views.MarkMatchInfosView.as_view()),
url(r'^markdata/show/(?P<name>(hits|scores|serverecord|playgroundrecord))/(?P<match_id>[0-9]{4}_[0-9]{4}_[0-9]{4})$', views.MarkDataShow.as_view()),
url(r'^markdata/sync/(?P<name>(hits|scores|serverecord|playgroundrecord))/(?P<match_id>[0-9]{4}_[0-9]{4}_[0-9]{4})$', views.MarkDataSync.as_view()),
url(r'^markdata/sync/(?P<name>(hits|scores))/(?P<match_id>[0-9]{4}_[0-9]{4}_[0-9]{4})/retry$', views.MarkDataSyncRetry.as_view()),
url(r'^docs/(?P<module_id>[a-zA-Z0-9_]+)(/)?$', views.DocsView.as_view()),
url(r'^docs/link/(?P<module_id>[a-zA-Z0-9_]+)(/)?$', views.DocLinkView.as_view()),
url(r'^history/(?P<module_id>[a-zA-Z0-9_]+)(/)?$', views.DocsView.as_view()),
url(r'^companylist$', views.CompanysListView.as_view()),
# test url
url(r'^sn/(?P<type_id>[a-z]+)$', views.NewSN.as_view()),
url(r'^test$', views.Test.as_view()),
]
|
[
"15234407153@163.com"
] |
15234407153@163.com
|
882dd051b7a1fff21dee017e84de337271d6bcb6
|
39329ae5773c9b4c1f9c91eec393507f5e8ae1c0
|
/server/.history/server_20200307213734.py
|
40e4aa62a922652973d3ff4b8b8636ddb900856f
|
[] |
no_license
|
dobreandrei1/legal
|
083267aae7faa10775e5a634679869fce0ac3136
|
dd05fad8df599f9fc34f56628ebd8861f7a004b4
|
refs/heads/master
| 2021-09-08T20:16:29.926214
| 2020-03-08T09:24:04
| 2020-03-08T09:24:04
| 245,785,262
| 0
| 0
| null | 2021-09-03T00:42:33
| 2020-03-08T09:22:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
from pathlib import Path
from flask import Flask, render_template, request, send_file, send_from_directory, safe_join, abort, current_app
# from werkzeug import secure_filename
import pandas as pd
import os
import time
import json
from flask_cors import CORS
from haikunator import Haikunator
import unidecode
import PyPDF2
import unidecode
haikunator = Haikunator()
app = Flask(__name__)
CORS(app)
applicationVersion = 0
@app.route('/upload')
def upload_file():
return render_template('upload.html')
@app.route('/api/titles', methods = ['GET', 'POST'])
def get_titles():
if request.method == 'POST':
f = request.files['file']
filename = request.form['filename']
# TODO: maybe check if file alreay exists and not save multipletime
# - get list of all files
# - if filename variable is a substr of any file name in folder: compare their contents
# - if match don`t save file again but use that one
name = filename + '.pdf'
if Path(name).exists():
name = filename + '.pdf'
f.save(name)
pdfFileObject = open('clauze.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObject)
pages = pdfReader.numPages
clauzeDoc = ''
for page in pages:
clauzeDoc += pdfReader.getPage(page).extractText()
pdfFileObject1 = open(name, 'rb')
pdfReader1 = PyPDF2.PdfFileReader(pdfFileObject1)
pages1 = pdfReader1.numPages
contractDoc = ''
for page in pages1:
contractDoc += pdfReader1.getPage(page).extractText()
return 1
if __name__ == '__main__':
app.run(debug = False, host='0.0.0.0')
|
[
"dobreandrei1@yahoo.com"
] |
dobreandrei1@yahoo.com
|
2ff3d6352d5241a08ded28a3f45e2e30c903eee7
|
1841c29ffb26901bc7830b2ce4ea712197f1b740
|
/models/GraphNN/DTNN.py
|
e2ad09c182e6617d8bbdf55b57b5fb2b13b136e6
|
[
"MIT"
] |
permissive
|
chubbymaggie/Deep_Learning_On_Code_With_A_Graph_Vocabulary
|
756bdd54b17d351d31200cc0ceacf8f639e0c678
|
29ee2fdffc5bc05582a91025697e256980e75ef2
|
refs/heads/master
| 2020-03-28T12:33:33.820671
| 2018-09-10T22:54:14
| 2018-09-10T22:54:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
from collections import OrderedDict
from mxnet import gluon
from models.FITB.FITBModel import FITBModel
from models.GraphNN.MPNN import MPNN
class DTNN(MPNN):
'''
Deep Tensor Neural Network from https://www.nature.com/articles/ncomms13890
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.hidden_size = kwargs['hidden_size']
# Initializing model components
with self.name_scope():
self.hidden_message_dense = gluon.nn.Dense(self.hidden_size, in_units=self.hidden_size)
self.hidden_and_edge_dense = gluon.nn.Dense(self.hidden_size, in_units=self.hidden_size)
self.edge_type_weightings = OrderedDict()
for t in self.data_encoder.all_edge_types:
edge_type_weighting = self.params.get('edge_type_weighting_{}'.format(t), grad_req='write',
shape=(1, self.hidden_size))
self.__setattr__('edge_type_weighting_{}'.format(t), edge_type_weighting)
self.edge_type_weightings[t] = edge_type_weighting
if FITBModel in self.__class__.mro():
self.readout_mlp = gluon.nn.HybridSequential()
with self.readout_mlp.name_scope():
self.readout_mlp.add(gluon.nn.Dense(self.hidden_size, activation='tanh', in_units=self.hidden_size))
self.readout_mlp.add(gluon.nn.Dense(1, in_units=self.hidden_size))
def compute_messages(self, F, hidden_states, edges, t):
hidden_states = self.hidden_message_dense(hidden_states)
summed_msgs = []
for key in self.edge_type_weightings.keys():
adj_mat, edge_type_weighting = edges[key], self.edge_type_weightings[key]
# Compute the messages passed for this edge type
passed_msgs = F.tanh(
self.hidden_and_edge_dense(hidden_states * edge_type_weighting.data())) # n_vertices X hidden_size
# Sum messages from all neighbors
summed_msgs.append(F.dot(adj_mat, passed_msgs))
summed_msgs = F.sum(F.stack(*summed_msgs), axis=0)
return summed_msgs
def update_hidden_states(self, F, hidden_states, messages, t):
return hidden_states + messages
def readout(self, F, hidden_states):
return self.readout_mlp(hidden_states)
|
[
"mwcvitkovic@gmail.com"
] |
mwcvitkovic@gmail.com
|
f3e029ef5acbe8e796a4ba75d99292456d5d7dd7
|
8832f83436809e8e918e60e5526d95add9fe8dbd
|
/books_app/migrations/0069_auto_20191002_1610.py
|
825b2b23a78d57c127bd9697fe680eaecabd9d58
|
[] |
no_license
|
HCDigitalScholarship/booksofduchesses
|
e31e56eaba253b92a1362de5918b5b005cb27f3c
|
3f0e27515963c92a56714c5bada3b6a68a8665df
|
refs/heads/master
| 2022-12-09T18:41:20.019687
| 2021-10-25T14:58:18
| 2021-10-25T14:58:18
| 190,254,161
| 0
| 3
| null | 2022-12-08T05:21:54
| 2019-06-04T18:05:08
|
Python
|
UTF-8
|
Python
| false
| false
| 849
|
py
|
# Generated by Django 2.2.2 on 2019-10-02 16:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("books_app", "0068_auto_20190930_1758")]
operations = [
migrations.AddField(
model_name="text",
name="estc_link",
field=models.CharField(
blank=True, max_length=800, verbose_name="ESTC Link"
),
),
migrations.AlterField(
model_name="text",
name="ihrt_link",
field=models.CharField(blank=True, max_length=800),
),
migrations.AlterField(
model_name="text",
name="me_compendium_link",
field=models.CharField(
blank=True, max_length=200, verbose_name="ME Compendium Link"
),
),
]
|
[
"apjanco@gmail.com"
] |
apjanco@gmail.com
|
da05f206093955bc97ef19a62bc0a70e69711fc6
|
5e9dacbb7a9613b7c8d8c92398bb66926a314c38
|
/script.py
|
ecff88305875f987118660b170ce2849290d9f87
|
[] |
no_license
|
pol9111/tencent_WechatOffAcc_auto
|
645b95bfd893706df4651f1e8f67ea1dc57a03de
|
3aa2a9a8a78796d5b829f9bf49cc849713ed41b7
|
refs/heads/master
| 2020-03-24T16:24:08.783424
| 2018-07-30T04:07:14
| 2018-07-30T04:07:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
import json
import pymongo
import re
from config import *
def response(flow):
global like_num, title, pub_time, read_num, comment_num
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
table = db[MONGO_COLLECTION]
# 获取微信广告json文件, 里面有阅读数和点赞数
url_msg = 'mp.weixin.qq.com/mp/getappmsgext?'
if url_msg in flow.request.url:
text_msg = flow.response.text
data_py = json.loads(text_msg)
content = data_py.get('appmsgstat')
like_num = content.get('like_num')
read_num = content.get('read_num')
comment_num = data_py.get('comment_count')
# 获取文章响应文件, 并匹配标题和发布时间
url_article = 'mp.weixin.qq.com/s?'
if url_article in flow.request.url:
text_arti = flow.response.text
pub_time = re.findall(r'publish_time.*"(\d+-\d+-\d+)".*', text_arti)[0]
title = re.findall(r'msg_title\s=\s"(.*?)";', text_arti)[0]
data = {
'文章标题': title,
'发布时间': pub_time,
'阅读数': read_num,
'点赞数': like_num,
'评论数': comment_num,
}
print(data)
table.update({'文章标题': title}, {'$set': data}, True)
|
[
"biscuit36@163.com"
] |
biscuit36@163.com
|
a2d6c12a2bd7956f2c562f8cfe0e2ac7678d9769
|
3003a8663135aa10f5a152a8642bc6ab270995b9
|
/ggCloudSDK/google-cloud-sdk/lib/googlecloudsdk/sql/lib/instances.py
|
9580cc32edf3f272d2994243b0b16c424ce6e6fb
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/packmybot
|
1b4d199b36d196e5e769a781b520019bb4d0bdbc
|
92de1e72cfc51b41447366ffc81a9dcd9a5e7870
|
refs/heads/master
| 2022-11-25T23:46:06.946645
| 2015-10-22T08:22:04
| 2015-10-22T08:22:04
| 282,313,675
| 0
| 0
| null | 2020-07-24T20:50:10
| 2020-07-24T20:50:10
| null |
UTF-8
|
Python
| false
| false
| 7,040
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""Common utility functions for sql instances."""
from googlecloudsdk.calliope import exceptions
class _BaseInstances(object):
"""Common utility functions for sql instances."""
@classmethod
def _SetBackupConfiguration(cls, sql_messages, settings, args, original):
"""Sets the backup configuration for the instance."""
# these args are only present for the patch command
no_backup = not getattr(args, 'backup', True)
if original and (
any([args.backup_start_time, args.enable_bin_log is not None,
no_backup])):
if original.settings.backupConfiguration:
backup_config = original.settings.backupConfiguration[0]
else:
backup_config = sql_messages.BackupConfiguration(
startTime='00:00',
enabled=False),
elif not any([args.backup_start_time, args.enable_bin_log is not None,
no_backup]):
return
if not original:
backup_config = sql_messages.BackupConfiguration(
startTime='00:00',
enabled=False)
if args.backup_start_time:
backup_config.startTime = args.backup_start_time
backup_config.enabled = True
if no_backup:
if args.backup_start_time or args.enable_bin_log is not None:
raise exceptions.ToolException(
('Argument --no-backup not allowed with'
' --backup-start-time or --enable-bin-log'))
backup_config.enabled = False
if args.enable_bin_log is not None:
backup_config.binaryLogEnabled = args.enable_bin_log
cls.AddBackupConfigToSettings(settings, backup_config)
@staticmethod
def _SetDatabaseFlags(sql_messages, settings, args):
if args.database_flags:
settings.databaseFlags = []
for (name, value) in args.database_flags.items():
settings.databaseFlags.append(sql_messages.DatabaseFlags(
name=name,
value=value))
elif getattr(args, 'clear_database_flags', False):
settings.databaseFlags = []
@staticmethod
def _ConstructSettingsFromArgs(sql_messages, args):
"""Constructs instance settings from the command line arguments.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A settings object representing the instance settings.
Raises:
ToolException: An error other than http error occured while executing the
command.
"""
settings = sql_messages.Settings(
tier=args.tier,
pricingPlan=args.pricing_plan,
replicationType=args.replication,
activationPolicy=args.activation_policy)
# these args are only present for the patch command
clear_authorized_networks = getattr(args, 'clear_authorized_networks',
False)
clear_gae_apps = getattr(args, 'clear_gae_apps', False)
if args.authorized_gae_apps:
settings.authorizedGaeApplications = args.authorized_gae_apps
elif clear_gae_apps:
settings.authorizedGaeApplications = []
if any([args.assign_ip is not None, args.require_ssl is not None,
args.authorized_networks, clear_authorized_networks]):
settings.ipConfiguration = sql_messages.IpConfiguration()
if args.assign_ip is not None:
settings.ipConfiguration.enabled = args.assign_ip
if args.authorized_networks:
settings.ipConfiguration.authorizedNetworks = args.authorized_networks
if clear_authorized_networks:
# For patch requests, this field needs to be labeled explicitly cleared.
settings.ipConfiguration.authorizedNetworks = []
if args.require_ssl is not None:
settings.ipConfiguration.requireSsl = args.require_ssl
if any([args.follow_gae_app, args.gce_zone]):
settings.locationPreference = sql_messages.LocationPreference(
followGaeApplication=args.follow_gae_app,
zone=args.gce_zone)
if getattr(args, 'enable_database_replication', None) is not None:
settings.databaseReplicationEnabled = args.enable_database_replication
return settings
@classmethod
def ConstructInstanceFromArgs(cls, sql_messages, args,
original=None, instance_ref=None):
"""Construct a Cloud SQL instance from command line args.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The CLI arg namespace.
original: sql_messages.DatabaseInstance, The original instance, if some of
it might be used to fill fields in the new one.
instance_ref: reference to DatabaseInstance object, used to fill project
and instance information.
Returns:
sql_messages.DatabaseInstance, The constructed (and possibly partial)
database instance.
Raises:
ToolException: An error other than http error occured while executing the
command.
"""
settings = cls._ConstructSettingsFromArgs(sql_messages, args)
cls._SetBackupConfiguration(sql_messages, settings, args, original)
cls._SetDatabaseFlags(sql_messages, settings, args)
# these flags are only present for the create command
region = getattr(args, 'region', None)
database_version = getattr(args, 'database_version', None)
instance_resource = sql_messages.DatabaseInstance(
region=region,
databaseVersion=database_version,
masterInstanceName=getattr(args, 'master_instance_name', None),
settings=settings)
if hasattr(args, 'master_instance_name'):
if args.master_instance_name:
replication = 'ASYNCHRONOUS'
activation_policy = 'ALWAYS'
else:
replication = 'SYNCHRONOUS'
activation_policy = 'ON_DEMAND'
if not args.replication:
instance_resource.settings.replicationType = replication
if not args.activation_policy:
instance_resource.settings.activationPolicy = activation_policy
if instance_ref:
cls.SetProjectAndInstanceFromRef(instance_resource, instance_ref)
return instance_resource
class InstancesV1Beta3(_BaseInstances):
"""Common utility functions for sql instances V1Beta3."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.instance = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = [backup_config]
class InstancesV1Beta4(_BaseInstances):
"""Common utility functions for sql instances V1Beta4."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.name = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = backup_config
|
[
"cboussicaud@leaseplan.fr"
] |
cboussicaud@leaseplan.fr
|
f9da954cdcb17cee51e9d873568d288fdf2c9cdb
|
f6f29c2fa719c53eee73de2acd86db9e1278182e
|
/code_wars/calculating_with_functions.py
|
a3f2c7e84a244f5b3dd4d6052494c5ab40d538cb
|
[] |
no_license
|
byt3-m3/python_code_practice
|
ca08320e1778449d30204b65f15903d5830b7975
|
40e215c4d4ab62cf7d55d2456d94550335825906
|
refs/heads/master
| 2023-07-24T08:29:06.624850
| 2021-09-04T02:39:32
| 2021-09-04T02:39:32
| 256,984,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,290
|
py
|
'''
This time we want to write calculations using functions and get the results. Let's have a look at some examples:
seven(times(five())) # must return 35
four(plus(nine())) # must return 13
eight(minus(three())) # must return 5
six(divided_by(two())) # must return 3
Requirements:
There must be a function for each number from 0 ("zero") to 9 ("nine")
There must be a function for each of the following mathematical operations: plus, minus, times, dividedBy (divided_by in Ruby and Python)
Each calculation consist of exactly one operation and two numbers
The most outer function represents the left operand, the most inner function represents the right operand
Divison should be integer division. For example, this should return 2, not 2.666666...:
'''
def _process(data, base):
num = data[0]
oper = data[1]
if oper == "*":
return base * num
if oper == "/":
return base // num
if oper == "+":
return base + num
if oper == "-":
return base - num
def zero(data=None):
if isinstance(data, tuple):
return _process(data, 0)
return 0
def one(data=None):
if isinstance(data, tuple):
return _process(data, 1)
return 1
def two(data=None):
if isinstance(data, tuple):
return _process(data, 2)
return 2
def three(data=None):
if isinstance(data, tuple):
return _process(data, 3)
return 3
def four(data=None):
if isinstance(data, tuple):
return _process(data, 4)
return 4
def five(data=None):
if isinstance(data, tuple):
return _process(data, 5)
return 5
def six(data=None):
if isinstance(data, tuple):
return _process(data, 6)
return 6
def seven(data=None):
if isinstance(data, tuple):
return _process(data, 7)
return 7
def eight(data=None):
if isinstance(data, tuple):
return _process(data, 8)
return 8
def nine(data=None):
if isinstance(data, tuple):
return _process(data, 9)
return 9
def plus(num):
return (num, "+")
def minus(num):
return (num, "-")
def times(num):
return (num, "*")
def divided_by(num):
return (num, "/")
result_1 = one(minus(five()))
result_2 = five(times(seven()))
print(result_1)
print(result_2)
|
[
"cbaxtertech@gmail.com"
] |
cbaxtertech@gmail.com
|
e3baf698b803e39d4869c69af482d97836496848
|
91d96fc4084a55a74f761ed7bc7d0adba533618a
|
/projects/pset2.0_Forkable_Difficulty_Adjusting/blockchain-visualizer/visualize.py
|
352b2f7230f8cd77c28efa64538cda9744295698
|
[
"MIT"
] |
permissive
|
Averylamp/mas.s62
|
169bb76f1289a3d4569a952075bfb8e7842e1dca
|
382dc036ae014785be4c464ed8c4aef533fd52ab
|
refs/heads/master
| 2020-03-17T16:14:56.613227
| 2018-05-17T03:56:09
| 2018-05-17T03:56:09
| 133,741,785
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
import pickle
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import graphviz
def mine_rate_info(endpoint_block, origin_block, block_information, time_interval):
endpoint_dt = datetime.fromtimestamp(highest_block[0]['timestamp'])
origin_dt = datetime.fromtimestamp(block_information[origin_block]['timestamp'])
block_hash = endpoint_block
num_buckets = int((endpoint_dt - origin_dt).total_seconds() / time_interval) + 5
mined_buckets = [0]*num_buckets
times_list = [origin_dt + timedelta(seconds=x*time_interval) for x in range(0, num_buckets)]
assert len(times_list) == len(mined_buckets)
while block_hash != '':
block_info = block_information[block_hash]
timestamp = block_information[block_hash]['timestamp']
dt = datetime.fromtimestamp(timestamp)
bucket_ind = int((dt - origin_dt).total_seconds() / time_interval)
mined_buckets[bucket_ind] += 1
block_hash = block_info['blockInformation']['previousHash']
return times_list, mined_buckets
def aggregate_info(mined_buckets):
num_buckets = len(mined_buckets)
aggregate_buckets = [0]*num_buckets
for i in range(num_buckets):
if i == 0:
aggregate_buckets[0] = mined_buckets[0]
else:
aggregate_buckets[i] = aggregate_buckets[i-1] + mined_buckets[i]
return aggregate_buckets
def generate_graphviz(block_information):
g = graphviz.Digraph('G', filename='block_information.gv')
g.node("origin", "")
for block_hash in block_information:
g.node(block_hash, "")
prev_hash = block_information[block_hash]['blockInformation']['previousHash']
if prev_hash == '':
prev_hash = "origin"
g.edge(prev_hash, block_hash)
g.view()
block_information = pickle.load(open("../server-python/block_information.pickle", 'rb'))
highest_block = pickle.load(open("../server-python/highest_block.pickle", 'rb'))
print("Creating graphviz...")
# generate_graphviz(block_information)
print("Done.")
# exit()
# block height 0: 6c179f21e6f62b629055d8ab40f454ed02e48b68563913473b857d3638e23b28
origin_block = "6c179f21e6f62b629055d8ab40f454ed02e48b68563913473b857d3638e23b28"
forked_block = "00001d87846888b85e4b9b757b59a936b0ff33d8128518c78efaa092572efbfd" # Put the hash of another tip here to graph it as well
endpoint_block = highest_block[0]['blockHash']
print(endpoint_block)
time_interval = 0.5 # seconds
times_list, mined_buckets = mine_rate_info(endpoint_block, origin_block, block_information, time_interval)
forked_times_list, forked_mined_buckets = mine_rate_info(forked_block, origin_block, block_information, time_interval)
aggregate_buckets = aggregate_info(mined_buckets)
forked_aggregate_buckets = aggregate_info(forked_mined_buckets)
print("Plotting data...")
# line1, = plt.plot(times_list, mined_buckets, label="blocks mined / {}s".format(time_interval))
line2, = plt.plot(times_list, aggregate_buckets, label="total blocks mined")
# line3, = plt.plot(times_list, forked_mined_buckets, label="attacker blocks mined / {}s".format(time_interval))
line4, = plt.plot(times_list, forked_aggregate_buckets, label="attacker total blocks mined")
plt.legend(handles=[line2, line4])
plt.show()
print("Done")
|
[
"averylamp@gmail.com"
] |
averylamp@gmail.com
|
447215391bd91ac4d5a721c47f8d0298d4eb5b3f
|
c001d8cff7e634bfa19d682ccdcf5261bc7bf397
|
/cotizacionweb/migrations/0005_auto_20160420_1104.py
|
f9051f65ba22309b3fc40fa1bad989072d8ebdc8
|
[] |
no_license
|
yusnelvy/mtvmcotizacionv02
|
4053a6883519901e3652a141ef83c297c5aa0ccd
|
f0d94faff9c721f25018b7db12a07786508da565
|
refs/heads/master
| 2021-01-21T12:58:49.014716
| 2016-05-06T20:49:59
| 2016-05-06T20:49:59
| 50,135,715
| 0
| 0
| null | 2016-05-25T12:32:34
| 2016-01-21T20:48:27
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,464
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenedor', '0005_contenedortipicopormueble_tipo_de_contenido'),
('cotizacionweb', '0004_auto_20160414_1529'),
]
operations = [
migrations.RenameField(
model_name='serviciomueble',
old_name='porcentaje_complejidad',
new_name='cantidad',
),
migrations.RenameField(
model_name='serviciomueble',
old_name='descripcion_monto_servicio',
new_name='descripcion_cantidad',
),
migrations.RemoveField(
model_name='cotizacionhistoricofecha',
name='fecha_actual',
),
migrations.RemoveField(
model_name='cotizacionhistoricofecha',
name='hora_actual',
),
migrations.RemoveField(
model_name='serviciomueble',
name='complejidad_servicio',
),
migrations.RemoveField(
model_name='serviciomueble',
name='incluido',
),
migrations.RemoveField(
model_name='serviciomueble',
name='monto_servicio',
),
migrations.RemoveField(
model_name='serviciomueble',
name='monto_servicio_asignado',
),
migrations.AddField(
model_name='contenedormueble',
name='tipo_de_contenido',
field=models.ForeignKey(to='contenedor.TipoDeContenido', default=1),
preserve_default=False,
),
migrations.AddField(
model_name='cotizacionhistoricofecha',
name='fecha',
field=models.DateTimeField(default='2016-04-01 00:00:00'),
preserve_default=False,
),
migrations.AddField(
model_name='cotizacionservicio',
name='cantidad_servicio',
field=models.DecimalField(max_digits=7, decimal_places=2, default=1),
preserve_default=False,
),
migrations.AddField(
model_name='fechadecotizacion',
name='obligatoria',
field=models.BooleanField(default=None),
),
migrations.AlterField(
model_name='cotizacionestado',
name='fecha_registro',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"yusnelvy@gmail.com"
] |
yusnelvy@gmail.com
|
2bc647123df644c429a647698050cb197c682e88
|
5b5a49643c75aa43d5a876608383bc825ae1e147
|
/tests/lists/p121_test.py
|
22041a3cf5ee7085bd6f9c855959da66c5eaec06
|
[] |
no_license
|
rscai/python99
|
281d00473c0dc977f58ba7511c5bcb6f38275771
|
3fa0cb7683ec8223259410fb6ea2967e3d0e6f61
|
refs/heads/master
| 2020-04-12T09:08:49.500799
| 2019-10-06T07:47:17
| 2019-10-06T07:47:17
| 162,393,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
from python99.lists.p121 import insert_at, insert_at_mutable
def test_insert_at():
assert insert_at([1, 2, 3, 4, 5, 6], 2, 'a') == [1, 'a', 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 1, 'a') == ['a', 1, 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 7, 'a') == [1, 2, 3, 4, 5, 6, 'a']
def test_insert_at_mutable():
assert insert_at([1, 2, 3, 4, 5, 6], 2, 'a') == [1, 'a', 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 1, 'a') == ['a', 1, 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 7, 'a') == [1, 2, 3, 4, 5, 6, 'a']
|
[
"ray.s.cai@icloud.com"
] |
ray.s.cai@icloud.com
|
f340f6fe2ce9cef2755406e2d7327934041ad8c1
|
6fe477c7b32f0020a5fffe6affbc7546b16ab879
|
/healthpoints/src/healthpoints/apps/tracks/migrations/0003_auto__add_field_activity_shard_id__add_field_activity_note_id.py
|
aca19183adb724bd430c79164d590c788b213d1b
|
[] |
no_license
|
rootart/healthpoints
|
cb79cc4b8e3ceb9401eb5894518e026673f98545
|
c33f8e2d0d62e66b3e967f3e464097482abebd91
|
refs/heads/master
| 2021-01-01T05:52:06.661165
| 2014-10-12T05:45:11
| 2014-10-12T05:45:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,555
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Activity.shard_id'
db.add_column(u'tracks_activity', 'shard_id',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Activity.note_id'
db.add_column(u'tracks_activity', 'note_id',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Activity.shard_id'
db.delete_column(u'tracks_activity', 'shard_id')
# Deleting field 'Activity.note_id'
db.delete_column(u'tracks_activity', 'note_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tracks.activity': {
'Meta': {'object_name': 'Activity'},
'average_speed': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'calories': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'distance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '6', 'blank': 'True'}),
'guID': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'moving_time': ('timedelta.fields.TimedeltaField', [], {'null': 'True', 'blank': 'True'}),
'note_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'polyline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'resource_state': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'route': ('django.contrib.gis.db.models.fields.LineStringField', [], {'null': 'True', 'blank': 'True'}),
'shard_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'total_elevation_gain': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['tracks']
|
[
"dijakroot@gmail.com"
] |
dijakroot@gmail.com
|
bf811162014e14e26b71ed53ffec58e618d594a3
|
2157782cf5875767f8d1fe0bb07243da2e87600d
|
/send_email/email_helper.py
|
5012f4ab74d9a69b947ea3e386bf2d903abaa39f
|
[] |
no_license
|
mouday/SomeCodeForPython
|
9bc79e40ed9ed851ac11ff6144ea080020e01fcd
|
ddf6bbd8a5bd78f90437ffa718ab7f17faf3c34b
|
refs/heads/master
| 2021-05-09T22:24:47.394175
| 2018-05-11T15:34:22
| 2018-05-11T15:34:22
| 118,750,143
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
#email_helper.py
'''
参考:https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/001432005226355aadb8d4b2f3f42f6b1d6f2c5bd8d5263000
封装成简单邮件发送模块
'''
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
debug = True # debug开关
def debug_info(text):
if debug == True:
print(text)
class EmailClient(object):
'邮件发送端初始化类'
def __init__(self, smtp_server):
'初始化服务器地址'
self._smtp_server = smtp_server
self.addrs = [] # 邮件地址列表, 格式(addr, name)
def login(self, from_addr, password, from_name="admin"):
'登录'
self._from_addr = from_addr
self._password = password
self._from_name = from_name
try:
self.server = smtplib.SMTP(self._smtp_server, 25)
#server.set_debuglevel(1)
self.server.login(self._from_addr, self._password)
except Exception as e:
return -1 # 登录失败
debug_info("登录失败")
else:
return 0 # 登录成功
debug_info("登录成功")
def send(self, title, text, to_addr, to_name=None):
'发送邮件'
if to_name == None: to_name=to_addr
try:
# 接受方信息
msg = MIMEText(text, 'plain', 'utf-8')
msg['From'] = self._format_addr('%s<%s>' % (self._from_name,self._from_addr))
msg['To'] = self._format_addr('%s <%s>' % (to_name,to_addr))
msg['Subject'] = Header(title, 'utf-8').encode()
# 发送内容
self.server.sendmail(self._from_addr, to_addr, msg.as_string())
return 0
except Exception as e:
debug_info(e)
return -1
def add_address(self, addr, name=None):
'增加地址到地址列表'
if name==None: name = addr
self.addrs.append((addr, name))
def send_all(self, title, text):
'发送所有人'
success = 0
fail = 0
for addr, name in self.addrs:
ret = self.send(title, text, addr, name)
if ret == 0:
success += 1
else:
fail += 1
return success, fail
def __del__(self):
'析构'
self.server.quit()
def _format_addr(self, s):
'格式化地址'
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
if __name__ == '__main__':
email_client=EmailClient("smtp.163.com") # 邮箱服务器地址
email_client.login("username", "password", "name") # 登陆
email_client.add_address("email") # 增加收件人
email_client.add_address("email")
email_client.add_address("email")
# 发送
success, fail = email_client.send_all("邮件标题", "邮件内容,试试看能不能发送出去")
print("success:", success, "fail:", fail) # 返回发送结果
|
[
"1940607002@qq.com"
] |
1940607002@qq.com
|
762824112bf390cf4f8ff8ee2d484e6524fbca21
|
c95f245a5252ec1185e13ef5d37ff599dd451fee
|
/telethon/network/connection/tcpfull.py
|
fd9fd1cf58e9bd9932053d283a5d676b226f6cd5
|
[
"MIT"
] |
permissive
|
perryyo/Telethon
|
6f95ce09ad86a94c44fe697ba6d49df4914cb321
|
0046291254f9c96f8824ff7b42fa695fa3f71fc5
|
refs/heads/master
| 2020-04-07T17:08:15.994174
| 2019-02-11T07:13:44
| 2019-02-11T07:13:44
| 158,558,142
| 0
| 0
|
MIT
| 2018-11-21T14:12:22
| 2018-11-21T14:12:21
| null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
import struct
from zlib import crc32
from .connection import Connection
from ...errors import InvalidChecksumError
class ConnectionTcpFull(Connection):
"""
Default Telegram mode. Sends 12 additional bytes and
needs to calculate the CRC value of the packet itself.
"""
def __init__(self, ip, port, *, loop, proxy=None):
super().__init__(ip, port, loop=loop, proxy=proxy)
self._send_counter = 0
async def connect(self, timeout=None, ssl=None):
await super().connect(timeout=timeout, ssl=ssl)
self._send_counter = 0 # Important or Telegram won't reply
def _send(self, data):
# https://core.telegram.org/mtproto#tcp-transport
# total length, sequence number, packet and checksum (CRC32)
length = len(data) + 12
data = struct.pack('<ii', length, self._send_counter) + data
crc = struct.pack('<I', crc32(data))
self._send_counter += 1
self._writer.write(data + crc)
async def _recv(self):
packet_len_seq = await self._reader.readexactly(8) # 4 and 4
packet_len, seq = struct.unpack('<ii', packet_len_seq)
body = await self._reader.readexactly(packet_len - 8)
checksum = struct.unpack('<I', body[-4:])[0]
body = body[:-4]
valid_checksum = crc32(packet_len_seq + body)
if checksum != valid_checksum:
raise InvalidChecksumError(checksum, valid_checksum)
return body
|
[
"totufals@hotmail.com"
] |
totufals@hotmail.com
|
5bf1c3fcd512c1e389e2f7280476b3433ecf2119
|
c1b8ff60ed4d8c70e703f71b7c96a649a75c0cec
|
/ostPython1/multuple.py
|
c6e25d4906e5ab8d55c5aa5fce4761928a3d621c
|
[] |
no_license
|
deepbsd/OST_Python
|
836d4fae3d98661a60334f66af5ba3255a0cda5c
|
b32f83aa1b705a5ad384b73c618f04f7d2622753
|
refs/heads/master
| 2023-02-14T17:17:28.186060
| 2023-01-31T02:09:05
| 2023-01-31T02:09:05
| 49,534,454
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
#!/usr/bin/env python3
#
#
# multiple.py
#
# Lesson 7: String Formatting
#
# by David S. Jackson
# 11/30/2014
#
# OST Python1: Beginning Python
# for Pat Barton, Instructor
#
"""
takes as data a tuple of two-element tuples, such as ((1,1), 2,2), (12,13),
(4,4), (99,98)). This and/or similar data should be hard-coded (no need for
user input). Loop over the tuple and print out the results of multiplying the
numbers together, and use string formatting to display nicely.
"""
my_tuple = ( (8, 9), (11, 13), (4, 5), (19, 23), (9, 18))
for n1, n2 in my_tuple :
print("{0:2d}{a:^5}{1:2d}{b:>4}{2:4d}".format(n1, n2, n1*n2, a="X", b="="))
|
[
"deepbsd@yahoo.com"
] |
deepbsd@yahoo.com
|
1347ece238e08d92a8903165e9b040ea820981c3
|
9531e597cd3f865cc6b6f780498a18281c2413f8
|
/user_notifications/views.py
|
82f196088698131ef8e60ab25accfb76388764e8
|
[] |
no_license
|
dpitkevics/DevNet
|
7133b80ce5d56b9c11aa4c500d530faed7cb13f4
|
98ebc3916346e6c2bda79711a3896f7c2a8e2ac8
|
refs/heads/master
| 2020-04-15T12:04:00.245848
| 2015-09-14T17:45:39
| 2015-09-14T17:45:39
| 41,320,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
import json
import redis
from notifications import notify
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from notifications.models import Notification
from .serializers import NotificationSerializer
@login_required
def get_notifications(request):
query = request.POST.get('query', None)
notification_serializer_set = []
if query is not None:
notifications = request.user.notifications.order_by('-timestamp').filter(Q(verb__contains=query) | Q(description__contains=query)).exclude(verb="")[:5]
else:
notifications = request.user.notifications.order_by('-timestamp').all().exclude(verb="")[:5]
for notification in notifications:
notification_serializer = NotificationSerializer(notification)
notification_serializer_set.append(notification_serializer.data)
return JsonResponse(notification_serializer_set, safe=False)
@login_required
def send_notification(request):
recipient_username = request.POST.get('recipient_username', None)
if recipient_username:
recipients = User.objects.filter(username=recipient_username)
else:
recipients = User.objects.all()
for recipient in recipients:
notify.send(
request.user,
recipient=recipient,
verb=request.POST.get('verb', ''),
description=request.POST.get('description', '')
)
return HttpResponse(json.dumps({"success": True}), content_type="application/json")
@login_required
def mark_as_read(request):
request.user.notifications.unread().mark_all_as_read()
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
for session in request.user.session_set.all():
redis_client.publish(
'notifications.%s' % session.session_key,
json.dumps({"mark_as_read": True, "unread_count": 0})
)
return HttpResponse(json.dumps({"success": True}), content_type="application/json")
@receiver(post_save, sender=Notification)
def on_notification_post_save(sender, **kwargs):
redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
notification = kwargs['instance']
recipient = notification.recipient
for session in recipient.session_set.all():
redis_client.publish(
'notifications.%s' % session.session_key,
json.dumps(dict(
count=recipient.notifications.unread().count()
))
)
|
[
"daniels.pitkevics@gmail.com"
] |
daniels.pitkevics@gmail.com
|
4d9685bae094c34f6844353f599ed8a19c912a5c
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/tests/st/ops/gpu/test_unpack_op.py
|
9a0d8cfda90f7c500d8e6fae7395c0f17d50f593
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535
| 2020-11-18T11:15:41
| 2020-11-18T11:15:41
| 313,906,414
| 6
| 1
|
Apache-2.0
| 2020-11-18T11:25:08
| 2020-11-18T10:57:26
| null |
UTF-8
|
Python
| false
| false
| 5,063
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations.array_ops as P
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
class UnpackNet(nn.Cell):
def __init__(self, nptype):
super(UnpackNet, self).__init__()
self.unpack = P.Unpack(axis=3)
self.data_np = np.array([[[[[0, 0],
[0, 1]],
[[0, 0],
[2, 3]]],
[[[0, 0],
[4, 5]],
[[0, 0],
[6, 7]]]],
[[[[0, 0],
[8, 9]],
[[0, 0],
[10, 11]]],
[[[0, 0],
[12, 13]],
[[0, 0],
[14, 15]]]]]).astype(nptype)
self.x1 = Parameter(initializer(Tensor(self.data_np), [2, 2, 2, 2, 2]), name='x1')
@ms_function
def construct(self):
return self.unpack(self.x1)
def unpack(nptype):
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
unpack_ = UnpackNet(nptype)
output = unpack_()
expect = (np.reshape(np.array([0] * 16).astype(nptype), (2, 2, 2, 2)),
np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype))
for i, exp in enumerate(expect):
assert (output[i].asnumpy() == exp).all()
def unpack_pynative(nptype):
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
x1 = np.array([[[[[0, 0],
[0, 1]],
[[0, 0],
[2, 3]]],
[[[0, 0],
[4, 5]],
[[0, 0],
[6, 7]]]],
[[[[0, 0],
[8, 9]],
[[0, 0],
[10, 11]]],
[[[0, 0],
[12, 13]],
[[0, 0],
[14, 15]]]]]).astype(nptype)
x1 = Tensor(x1)
expect = (np.reshape(np.array([0] * 16).astype(nptype), (2, 2, 2, 2)),
np.arange(2 * 2 * 2 * 2).reshape(2, 2, 2, 2).astype(nptype))
output = P.Unpack(axis=3)(x1)
for i, exp in enumerate(expect):
assert (output[i].asnumpy() == exp).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_float32():
unpack(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_float16():
unpack(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_int32():
unpack(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_int16():
unpack(np.int16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_uint8():
unpack(np.uint8)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_graph_bool():
unpack(np.bool)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_float32():
unpack_pynative(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_float16():
unpack_pynative(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_int32():
unpack_pynative(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_int16():
unpack_pynative(np.int16)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_uint8():
unpack_pynative(np.uint8)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_unpack_pynative_bool():
unpack_pynative(np.bool)
|
[
"513344092@qq.com"
] |
513344092@qq.com
|
123692f1d83c0d850298be8ebd18dc3df003f4e0
|
fb4b70ad38d0fc810cb9ee034c8fb963c079f64b
|
/easy/Self_Dividing_Numbers.py
|
33c51ce1cd72ee84467d7802a1ee8de8713c2bb0
|
[] |
no_license
|
ChrisLiu95/Leetcode
|
0e14f0a7b7aa557bb2576589da8e73dbeeae8483
|
baa3342ebe2600f365b9348455f6342e19866a44
|
refs/heads/master
| 2021-07-11T12:01:00.249208
| 2018-09-26T21:27:42
| 2018-09-26T21:27:42
| 117,451,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
"""
A self-dividing number is a number that is divisible by every digit it contains.
For example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.
Also, a self-dividing number is not allowed to contain the digit zero.
Given a lower and upper number bound, output a list of every possible self dividing number, including the bounds if possible.
Example 1:
Input:
left = 1, right = 22
Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]
Note:
The boundaries of each input argument are 1 <= left <= right <= 10000.
"""
class Solution(object):
def selfDividingNumbers(self, left, right):
res = []
for num in range(left, right + 1):
flag = True
temp = num
while temp != 0:
if temp % 10 == 0:
flag = False
break
elif num % (temp % 10) != 0:
flag = False
break
temp = temp / 10
if flag:
res.append(num)
return res
|
[
"xiangchong95@gmail.com"
] |
xiangchong95@gmail.com
|
c5726f75c757c38f8cbd21289f63e73ea05370c2
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/140_gui/pyqt_pyside/examples/PyQt_PySide_book/004_Main components/001_Inscription/171_setScaledContents - toClass.py
|
338077f7a2d487be688c7007a97764ffd712864a
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 522
|
py
|
from PySide import QtCore, QtGui
import sys
class SampleWindow(QtGui.QWidget):
def __init__(self):
super(SampleWindow, self).__init__()
window.setWindowTitle("Класс QLabel")
window.resize(300, 150)
label = QtGui.QLabel()
label.setText("Текст надписи")
label.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Plain)
label.setPixmap(QtGui.QPixmap("foto.png"))
label.setAutoFillBackground(True)
label.setScaledContents(True)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(label)
window.setLayout(vbox)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
c54e99a0862974e1abc8b7eaf5a168c002dff248
|
a5a4cee972e487512275c34f308251e6cc38c2fa
|
/dev/potential/EamPotential/dev_EamPotential.py
|
1587069d5e39deda89368cb54c938837b9a44bfc
|
[
"MIT"
] |
permissive
|
eragasa/pypospack
|
4f54983b33dcd2dce5b602bc243ea8ef22fee86b
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
refs/heads/master
| 2021-06-16T09:24:11.633693
| 2019-12-06T16:54:02
| 2019-12-06T16:54:02
| 99,282,824
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
import pypospack.potential as potential
symbols = ['Ni']
pot = potential.EamPotential(symbols=symbols)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.param_names == {}'.format(\
pot.param_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
print('pot.param == {}'.format(\
pot.param))
print(80*'-')
symbols = ['Ni','Al']
pot = potential.EamPotential(symbols=symbols)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.param_names == {}'.format(\
pot.param_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
print('pot.param == {}'.format(\
pot.param))
|
[
"eragasa@ufl.edu"
] |
eragasa@ufl.edu
|
7a529d56ccc005bfccfb9d8c19c6f483390fffa9
|
46bef3a57cb663991387e02f3cc6c0282bd17496
|
/ie/si23tinyyolov2/tflite/Tensor.py
|
0cc948eaa14ee73dcd9f9a7202d91d57e2d163ea
|
[] |
no_license
|
k5iogura/vinosyp
|
36964f4c51b9d695d46e19d64a49156eaaac0042
|
1ef35532c6ba392761f73504ed787c074781c400
|
refs/heads/master
| 2020-04-08T04:27:57.905968
| 2019-09-30T07:34:18
| 2019-09-30T07:34:18
| 159,017,659
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
class Tensor(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTensor(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Tensor()
x.Init(buf, n + offset)
return x
@classmethod
def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# Tensor
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Tensor
def Shape(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# Tensor
def ShapeAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# Tensor
def ShapeLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Tensor
def Type(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Tensor
def Buffer(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Tensor
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Tensor
def Quantization(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .QuantizationParameters import QuantizationParameters
obj = QuantizationParameters()
obj.Init(self._tab.Bytes, x)
return obj
return None
def TensorStart(builder): builder.StartObject(5)
def TensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def TensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def TensorAddType(builder, type): builder.PrependInt8Slot(1, type, 0)
def TensorAddBuffer(builder, buffer): builder.PrependUint32Slot(2, buffer, 0)
def TensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def TensorAddQuantization(builder, quantization): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(quantization), 0)
def TensorEnd(builder): return builder.EndObject()
|
[
"k5i.ogura.v40@gmail.com"
] |
k5i.ogura.v40@gmail.com
|
2e6ecb54b480a398f319df68538b50b978a06dc3
|
f34d3948b707e461151ee33296a61fb23a6d3f44
|
/month01/day11/day11/day10_exercise/exercise01.py
|
2661ccd6399fb82f85eed30d55de03d907cdb447
|
[] |
no_license
|
xiao-a-jian/python-study
|
f9c4e3ee7a2f9ae83bec6afa7c7b5434e8243ed8
|
c8e8071277bcea8463bf6f2e8cd9e30ae0f1ddf3
|
refs/heads/master
| 2022-06-09T17:44:41.804228
| 2020-05-05T07:48:07
| 2020-05-05T07:48:07
| 256,927,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
# 定义函数, 删除列表中相同元素(只保留一个)
# list01 = [6, 54, 65, 677, 6, 65, 6, 65]
# 更节省内存
# def delete_duplicates(list_target):
# for r in range(len(list_target) - 1, 0, -1):
# for c in range(r):
# if list_target[r] == list_target[c]:
# del list_target[r]
# break
#
# # 测试
# list01 = [6, 54, 65, 677, 6, 65, 6, 65]
# delete_all(list01)
# print(list01)
# 更简单
def delete_duplicates(list_target):
return set(list_target)
# 测试
list01 = [6, 54, 65, 677, 6, 65, 6, 65]
list01 = delete_duplicates(list01)
print(list01)
|
[
"1261247299@qq.com"
] |
1261247299@qq.com
|
4cd9fac0659f565ca93a4ac5eb56440c5998707d
|
b77565a023a88480bb3330b18be929a19775f5dc
|
/정승호/키로거/solution.py
|
570bd7078f7eb72449816e49fd2e0b55166a2674
|
[] |
no_license
|
Jeoungseungho/python-coding-study
|
5af34bff429e24a93f6af4b0473d793ea2b791ee
|
431e02d12d0834c71f423471701a2182f66a3776
|
refs/heads/master
| 2023-08-11T07:38:09.122123
| 2021-10-06T06:32:44
| 2021-10-06T06:32:44
| 283,200,892
| 20
| 12
| null | 2021-10-06T05:22:50
| 2020-07-28T12:07:21
|
Python
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
import sys
input = sys.stdin.readline
N = int(input())
for _ in range(N):
pass_word = input().rstrip()
left_stack = []
right_stack = []
for word in pass_word:
if word == '<':
if left_stack:
right_stack.append(left_stack.pop())
elif word == '>':
if right_stack:
left_stack.append(right_stack.pop())
elif word == '-':
if left_stack:
left_stack.pop()
else: left_stack.append(word)
left_stack.extend(reversed(right_stack))
print(''.join(left_stack))
|
[
"platoon07@khu.ac.kr"
] |
platoon07@khu.ac.kr
|
d29da2fa6b389a1e61c922b0468ca492e288956d
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/securitycenter/v1p1beta1/resources.py
|
204b6a0c852d1fd7a975618ac6a38fa929b91cb3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://securitycenter.googleapis.com/v1p1beta1/'
DOCS_URL = 'https://console.cloud.google.com/apis/api/securitycenter.googleapis.com/overview'
class Collections(enum.Enum):
"""Collections for all supported apis."""
ORGANIZATIONS = (
'organizations',
'organizations/{organizationsId}',
{},
['organizationsId'],
True
)
ORGANIZATIONS_ASSETS = (
'organizations.assets',
'organizations/{organizationsId}/assets/{assetsId}',
{},
['organizationsId', 'assetsId'],
True
)
ORGANIZATIONS_NOTIFICATIONCONFIGS = (
'organizations.notificationConfigs',
'{+name}',
{
'':
'organizations/{organizationsId}/notificationConfigs/'
'{notificationConfigsId}',
},
['name'],
True
)
ORGANIZATIONS_OPERATIONS = (
'organizations.operations',
'{+name}',
{
'':
'organizations/{organizationsId}/operations/{operationsId}',
},
['name'],
True
)
ORGANIZATIONS_SOURCES = (
'organizations.sources',
'{+name}',
{
'':
'organizations/{organizationsId}/sources/{sourcesId}',
},
['name'],
True
)
ORGANIZATIONS_SOURCES_FINDINGS = (
'organizations.sources.findings',
'organizations/{organizationsId}/sources/{sourcesId}/findings/'
'{findingId}',
{},
['organizationsId', 'sourcesId', 'findingId'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
|
[
"gcloud@google.com"
] |
gcloud@google.com
|
3db27f60c4eb7ce5a20739d242ecf35db354cf90
|
c329057d1561b8ffde0cf26677bb932b4c044826
|
/py32.py
|
25f958ea6850f7c2c14aa2456d3b6012da3874a1
|
[] |
no_license
|
kimotot/pe
|
b3611662110ca8a07b410a8e3d90c412c9decbd3
|
8d12cc64b0f9ad5156e2b1aed0245726acb9a404
|
refs/heads/master
| 2021-01-19T11:53:17.683814
| 2017-04-03T05:05:09
| 2017-04-03T05:05:09
| 82,271,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
# coding:UTF-8
import copy
import time
def permutations(origin):
''' 与えられたリスト要素の順列を求める関数
引数はリストなど、イテーラブルなもの
戻値は全ての順列を要素としてリストにしたもの
再帰呼び出し関数'''
if len(origin) == 0:
return [[]]
else:
ans = []
for index,header in enumerate(origin):
new_orign = copy.deepcopy(origin)
del new_orign[index]
for cuder in permutations(new_orign):
cuder.insert(0,header)
ans.append(copy.deepcopy(cuder))
return ans
def permutationsIt(origin):
''' 与えられたリスト要素の順列を求める関数
引数はリストなど、イテーラブルなもの
戻値は全ての順列を要素としてリストにしたもの
再帰呼び出し関数'''
if len(origin) == 0:
yield []
else:
for index, header in enumerate(origin):
new_orign = copy.deepcopy(origin)
del new_orign[index]
for cuder in permutationsIt(new_orign):
cuder.insert(0, header)
yield cuder
def pandegi14(alist):
'''1から9の数字列が、1X4のパンデジタルであるか判定する関数'''
x = alist[0]
y = alist[1]*1000 + alist[2]*100 + alist[3]*10 + alist[4]
z = alist[5]*1000 + alist[6]*100 + alist[7]*10 + alist[8]
if x * y == z:
return True,z
else:
return False,0
def pandegi23(alist):
'''1から9の数字列が、2X3のパンデジタルであるか判定する関数'''
x = alist[0]*10 + alist[1]
y = alist[2]*100 + alist[3]*10 + alist[4]
z = alist[5]*1000 + alist[6]*100 + alist[7]*10 + alist[8]
if x * y == z:
return True,z
else:
return False,0
if __name__ == "__main__":
start = time.time()
s = set()
for n in permutationsIt([1,2,3,4,5,6,7,8,9]):
b,z = pandegi14(n)
if b:
print(14,n)
s.add(z)
b,z = pandegi23(n)
if b:
print(23,n)
s.add(z)
print("総和={0}".format(sum(s)))
elapsed_time = time.time() - start
print("処理時間={0:.4f}".format(elapsed_time))
|
[
"god4bid@hear.to"
] |
god4bid@hear.to
|
89855498cc5ffedc6599b095d035b074719742e2
|
0bed1250a4805866f871d037c1bce3e5c8757101
|
/MATH PROB/factorsum.py
|
49ad0da6391d712e6d693b28e7c0123975692580
|
[] |
no_license
|
Srinjana/CC_practice
|
13018f5fd09f8a058e7b634a8626668a0058929a
|
01793556c1c73e6c4196a0444e8840b5a0e2ab24
|
refs/heads/main
| 2023-08-02T05:42:49.016104
| 2021-09-20T15:39:24
| 2021-09-20T15:39:24
| 358,312,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
# for a given number from a list of numbers find the factors and add the factors . If the sum of factors is present in the original list, sort the factors in acsending order
# and print them. If sum not in the original list, print -1.
# Author @Srinjana
def findFactorSum(n):
factor = [1]
for i in range (2 ,n+1):
if i%n ==0:
factor.append(i)
return sum(factor)
inplist = list(map(int, input().strip().split(",")))
flag = 0
for i in inplist:
if findFactorSum(i) in inplist:
flag = 1
print(i)
if flag==0:
print(-1)
|
[
"srinjanap.official@gmail.com"
] |
srinjanap.official@gmail.com
|
4c2e0128f87a1e1cd437f60867570b90acb4259e
|
714a22e87e5ae6a2b670a10437409100015f171b
|
/meshzoo/__init__.py
|
2e6201faacd3e0de9e0015493737a24f245fd3a2
|
[
"MIT"
] |
permissive
|
krober10nd/meshzoo
|
ce3aa71a8a87a0749df78c6939e7d893a05f91d1
|
5e8b04d81ee5c23887e3d0244273b3d90b2eba9a
|
refs/heads/master
| 2021-02-17T00:04:36.319498
| 2020-02-24T15:52:48
| 2020-02-24T15:52:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
from meshzoo.__about__ import __author__, __author_email__, __version__, __website__
from .cube import cube
from .helpers import create_edges, plot2d, show2d
from .hexagon import hexagon
from .moebius import moebius
from .rectangle import rectangle
from .simple_arrow import simple_arrow
from .simple_shell import simple_shell
from .sphere import icosa_sphere, octa_sphere, tetra_sphere, uv_sphere
from .triangle import triangle
from .tube import tube
__all__ = [
"__version__",
"__author__",
"__author_email__",
"__website__",
#
"cube",
"hexagon",
"moebius",
"rectangle",
"simple_arrow",
"simple_shell",
"uv_sphere",
"icosa_sphere",
"octa_sphere",
"tetra_sphere",
"triangle",
"tube",
#
"show2d",
"plot2d",
"create_edges",
]
|
[
"nico.schloemer@gmail.com"
] |
nico.schloemer@gmail.com
|
a292d226c79e5613f782f0ea465e9a03c06b0e6d
|
de725b742e69f38318c04cd44ac970e7135857a5
|
/assets/forms.py
|
0173d7e2fd8182e88243ee75191332c9c8f1868c
|
[] |
no_license
|
haochenxiao666/itelftool
|
e5c0811b48e01d0eeff13d15d33b89960091960a
|
8558dce6d97e7443c95513aa1389910c3902043f
|
refs/heads/master
| 2020-04-14T22:55:46.732111
| 2018-10-18T09:00:44
| 2018-10-18T09:00:44
| 164,183,750
| 1
| 0
| null | 2019-01-05T05:05:32
| 2019-01-05T05:05:31
| null |
UTF-8
|
Python
| false
| false
| 4,458
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.forms.widgets import *
from .models import Asset, IDC, HostGroup, Cabinet
'''
class AssetForm(forms.ModelForm):
class Meta:
model = Asset
exclude = ("id",)
widgets = {
'hostname': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;', 'placeholder': u'必填项'}),
'ip': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;', 'placeholder': u'必填项'}),
'other_ip': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'group': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'asset_no': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'asset_type': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'status': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'os': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'vendor': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'up_time': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'cpu_model': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'cpu_num': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'memory': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'disk': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'sn': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'idc': Select(attrs={'class': 'form-control', 'style': 'width:530px;'}),
'position': TextInput(attrs={'class': 'form-control', 'style': 'width:530px;', 'placeholder': u'物理机写位置,虚机写宿主'}),
'memo': Textarea(attrs={'rows': 4, 'cols': 15, 'class': 'form-control', 'style': 'width:530px;'}),
}
'''
class IdcForm(forms.ModelForm):
# def clean(self):
# cleaned_data = super(IdcForm, self).clean()
# value = cleaned_data.get('ids')
# try:
# Idc.objects.get(name=value)
# self._errors['ids'] = self.error_class(["%s的信息已经存在" % value])
# except Idc.DoesNotExist:
# pass
# return cleaned_data
class Meta:
model = IDC
exclude = ("id",)
widgets = {
'ids': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'name': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'address': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'tel': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'contact': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'contact_phone': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'ip_range': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'jigui': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
'bandwidth': TextInput(attrs={'class': 'form-control','style': 'width:450px;'}),
}
class GroupForm(forms.ModelForm):
def clean(self):
cleaned_data = super(GroupForm, self).clean()
value = cleaned_data.get('name')
try:
Cabinet.objects.get(name=value)
self._errors['name'] = self.error_class(["%s的信息已经存在" % value])
except Cabinet.DoesNotExist:
pass
return cleaned_data
class Meta:
model = HostGroup
exclude = ("id", )
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'style': 'width:450px;'}),
'desc': Textarea(attrs={'rows': 4, 'cols': 15, 'class': 'form-control', 'style': 'width:450px;'}),
}
class CabinetForm(forms.ModelForm):
class Meta:
model = Cabinet
exclude = ("id", )
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'style': 'width:450px;'}),
'idc': Select(attrs={'class': 'form-control', 'style': 'width:450px;'}),
'desc': Textarea(attrs={'rows': 4, 'cols': 15, 'class': 'form-control', 'style': 'width:450px;'}),
}
|
[
"420521738@qq.com"
] |
420521738@qq.com
|
22851ce7e83e2aef32c5620caf346fae7a63488a
|
e2f507e0b434120e7f5d4f717540e5df2b1816da
|
/097-yield-2.py
|
e7bd0b03ff61e85bbac2470ad044513187273938
|
[] |
no_license
|
ash/amazing_python3
|
70984bd32ae325380382b1fe692c4b359ef23395
|
64c98940f8a8da18a8bf56f65cc8c8e09bd00e0c
|
refs/heads/master
| 2021-06-23T14:59:37.005280
| 2021-01-21T06:56:33
| 2021-01-21T06:56:33
| 182,626,874
| 76
| 25
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# Using yield
def f():
for i in range(100):
yield i # not "return"
# Why do you need this assignment?
g1 = f() # generator object
g2 = f() # another generator
print(next(g1)) # value from generator 1
print(next(g2)) # from generator 2
print(next(g1)) # again 1
print(next(g2)) # ...
print(next(g1))
print(next(g2))
|
[
"andy@shitov.ru"
] |
andy@shitov.ru
|
b4f738393d2222e9668e9e7f689cb0733806ef01
|
87dc1f3fc40565138c1e7dc67f1ca7cb84b63464
|
/03_Hard/10_Knapsack_Problem/Knapsack_Problem.py
|
4151b0f9027ccdcdfa7f6ccba3270994d39e40ac
|
[] |
no_license
|
CodeInDna/Algo_with_Python
|
8424f79fd3051dbc5861ba171ac2b33c76eec8b9
|
a238e9e51effe76c530a4e0da7df871e45ec268a
|
refs/heads/master
| 2021-07-25T08:33:42.475255
| 2021-07-20T16:53:48
| 2021-07-20T16:53:48
| 229,921,183
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
# ---------------------------------- PROBLEM 10 (HARD)--------------------------------------#
# Knapsack Problem
# You are given an array of arrays. Each subarray in this array holds two integer values and
# represents an item; the first integer is the item's value, and the second integer is the item's
# weight. You are also given an integer representing the maximum capacity of a knapsack that you have.
# Your goal is to fit items in your knapsack, all the while maximizing their combined value. Note that
# the sum of the weights of the items that you pick cannot exceed the knapsack's capacity. Write a
# function that returns the maximized combined value of the items that you should pick, as well as an
# array of the indices of each item picked. Assume that there will only be one combination of items
# that maximizes the total value in the knapsack.
# Sample input: [[1, 2], [4, 3], [5, 6], [6, 7]], 10
# Sample output: [10, [1, 3]]
# ----------------METHOD 01---------------------#
# COMPLEXITY = TIME: O(Nc), SPACE: O(Nc), where N i the number of items and c is the capacity
def knapsackProblem(lst_Items, target_cap):
knapsackValues = [[0 for _ in range(target_cap + 1)] for _ in range(len(lst_Items) + 1)]
for i in range(1, len(lst_Items) + 1):
currentWeight = lst_Items[i - 1][1]
currentValue = lst_Items[i - 1][0]
for cap in range(target_cap + 1):
if currentWeight > cap:
knapsackValues[i][cap] = knapsackValues[i - 1][cap]
else:
knapsackValues[i][cap] = max(knapsackValues[i - 1][cap], knapsackValues[i - 1][cap - currentWeight] + currentValue)
return [knapsackValues[-1][-1], getKnapsackItems(knapsackValues, lst_Items)]
def getKnapsackItems(knapsackValues, items):
result = []
i = len(knapsackValues) - 1
c = len(knapsackValues[0]) - 1
while i > 0:
if knapsackValues[i][c] == knapsackValues[i - 1][c]:
i -= 1
else:
result.append(i - 1)
c -= items[i - 1][1]
i -= 1
if c == 0:
break
return list(reversed(result))
# ----------------METHOD 01---------------------#
|
[
"ykarelia323@gmail.com"
] |
ykarelia323@gmail.com
|
48329fba254e4b07d3988292bb905c7739573dfe
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_217/run_cfg.py
|
589bc3616bc9bbb96cd7a0726131bdbacc21691c
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1973.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1974.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1975.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1976.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1977.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
9ba139bcaa98b5c19be7ed4307c47d44abe13cff
|
2db1a0038d26ccb6adc572b536cb5cd401fd7498
|
/tryTen/Lib/site-packages/setuptools/py31compat.py
|
0f1753a87be81de04522e4b1d674aee34dfb2e8c
|
[] |
no_license
|
syurk/labpin
|
e795c557e7d7bcd4ff449cb9a3de32959a8c4968
|
04070dd5ce6c0a32c9ed03765f4f2e39039db411
|
refs/heads/master
| 2022-12-12T02:23:54.975797
| 2018-11-29T16:03:26
| 2018-11-29T16:03:26
| 159,692,630
| 0
| 1
| null | 2022-11-19T12:15:55
| 2018-11-29T16:04:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
import sys
import unittest
__all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name == 'platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
"""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: # removal errors are not the only possible
pass
self.name = None
unittest_main = unittest.main
_PY31 = (3, 1) <= sys.version_info[:2] < (3, 2)
if _PY31:
# on Python 3.1, translate testRunner==None to TextTestRunner
# for compatibility with Python 2.6, 2.7, and 3.2+
def unittest_main(*args, **kwargs):
if 'testRunner' in kwargs and kwargs['testRunner'] is None:
kwargs['testRunner'] = unittest.TextTestRunner
return unittest.main(*args, **kwargs)
|
[
"syurk738@students.bju.edu"
] |
syurk738@students.bju.edu
|
2e445e4f56c622f6f5d41a6de407c6c9d92f5b20
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/subclass_native2_list.py
|
9ad0b77ef6dd1c7659097492eec0ebb77099b017
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 587
|
py
|
class Base1:
def __init__(self, *args):
print("Base1.__init__", args)
class Clist1(Base1, list):
pass
a = Clist1()
print(len(a))
# Not compliant - list assignment should happen in list.__init__, which is not called
# because there's Base1.__init__, but we assign in list.__new__
#a = Clist1([1, 2, 3])
#print(len(a))
print("---")
class Clist2(list, Base1):
pass
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2()
#print(len(a))
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2([1, 2, 3])
#print(len(a))
|
[
"pfalcon@users.sourceforge.net"
] |
pfalcon@users.sourceforge.net
|
34a1e201add585aa04483afc9282d5dd3ebcab53
|
60d5ea4f007d49768d250ef394003f554003e4d0
|
/python/Linked List/148.Sort List.py
|
df0485a4e3990534fe5b2bb38f8196871282c2ac
|
[] |
no_license
|
EvanJamesMG/Leetcode
|
dd7771beb119ea1250dbb3b147a09053298cd63b
|
fa638c7fda3802e9f4e0751a2c4c084edf09a441
|
refs/heads/master
| 2021-01-10T17:11:10.896393
| 2017-12-01T16:04:44
| 2017-12-01T16:04:44
| 46,968,756
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
# coding=utf-8
'''
Sort a linked list in O(n log n) time using constant space complexity.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
归并排序,最佳时间复杂度O(n log n) 最坏的时间复杂度O(n log n)
由于题目对时间复杂度和空间复杂度要求比较高,所以查看了各种解法,最好的解法就是归并排序,由于
链表在归并操作时并不需要像数组的归并操作那样分配一个临时数组空间,所以这样就是常数空间复杂度了,当然这里不考虑递归所产生的系统调用的栈。
这里涉及到一个链表常用的操作,即快慢指针的技巧。设置slow和fast指针,
开始它们都指向表头,fast每次走两步,slow每次走一步,fast到链表尾部时,slow正好到中间,这样就将链表截为两段。
'''
class Solution:
# @param head, a ListNode
# @return a ListNode
def merge(self, head1, head2):
if head1 == None: return head2
if head2 == None: return head1
dummy = ListNode(0) #归并时,新建一个链表头结点
p = dummy
while head1 and head2:
if head1.val <= head2.val:
p.next = head1
head1 = head1.next
p = p.next
else:
p.next = head2
head2 = head2.next
p = p.next
if head1 == None:
p.next = head2
if head2 == None:
p.next = head1
return dummy.next
def sortList(self, head):
if head == None or head.next == None:
return head
slow = head; fast = head #快慢指针技巧的运用,用来截断链表。
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
head1 = head
head2 = slow.next
slow.next = None #head1和head2为截为两条链表的表头
head1 = self.sortList(head1)
head2 = self.sortList(head2)
head = self.merge(head1, head2)
return head
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
#
# if __name__ == "__main__":
#
# result = Solution().numTrees(3)
# print result
|
[
"Evan123mg@gmail.com"
] |
Evan123mg@gmail.com
|
d8e06bb45fd1f90be90bb45e0c0cc52f227b3187
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-eps/huaweicloudsdkeps/v1/model/link.py
|
a9a92750cec83aea4939f5cad6e9fa7a51be5167
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 3,044
|
py
|
# coding: utf-8
import pprint
import re
import six
class Link:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'href': 'str',
'rel': 'str'
}
attribute_map = {
'href': 'href',
'rel': 'rel'
}
def __init__(self, href=None, rel=None):
"""Link - a model defined in huaweicloud sdk"""
self._href = None
self._rel = None
self.discriminator = None
self.href = href
self.rel = rel
@property
def href(self):
"""Gets the href of this Link.
API的URL地址。
:return: The href of this Link.
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Link.
API的URL地址。
:param href: The href of this Link.
:type: str
"""
self._href = href
@property
def rel(self):
"""Gets the rel of this Link.
self。
:return: The rel of this Link.
:rtype: str
"""
return self._rel
@rel.setter
def rel(self, rel):
"""Sets the rel of this Link.
self。
:param rel: The rel of this Link.
:type: str
"""
self._rel = rel
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
0508b18ea031c12502a6dff30485a63fa71a0660
|
d17a8870ff8ac77b82d0d37e20c85b23aa29ca74
|
/lite/tests/unittest_py/pass/common/test_conv_scale_fuse_pass_base.py
|
a071233f2ff5d5725c9fc9aede18f373c5baff9c
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle-Lite
|
4ab49144073451d38da6f085a8c56822caecd5b2
|
e241420f813bd91f5164f0d9ee0bc44166c0a172
|
refs/heads/develop
| 2023-09-02T05:28:14.017104
| 2023-09-01T10:32:39
| 2023-09-01T10:32:39
| 104,208,128
| 2,545
| 1,041
|
Apache-2.0
| 2023-09-12T06:46:10
| 2017-09-20T11:41:42
|
C++
|
UTF-8
|
Python
| false
| false
| 3,376
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
sys.path.append('.')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
from test_conv_util import UpdatePaddingAndDilation, ConvOutputSize
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
weight_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=64), min_size=4, max_size=4))
paddings = draw(st.sampled_from([[1, 2], [4, 2]]))
dilations = draw(st.sampled_from([[1, 1]]))
groups = draw(st.sampled_from([1, 2, in_shape[1]]))
padding_algorithm = draw(st.sampled_from(["VALID", "SAME"]))
strides = draw(st.sampled_from([[1, 1], [2, 2]]))
scale = draw(st.floats(min_value=0.5, max_value=5))
scale_bias = draw(st.floats(min_value=0.0, max_value=1.0))
assume(in_shape[1] == weight_shape[1] * groups)
assume(weight_shape[0] % groups == 0)
paddings_, dilations_ = UpdatePaddingAndDilation(
in_shape, weight_shape, paddings, dilations, groups, padding_algorithm,
strides)
out_shape = [in_shape[0], weight_shape[0]]
oh, ow = ConvOutputSize(in_shape, weight_shape, dilations_, paddings_,
strides)
out_shape = out_shape + [oh, ow]
assume(oh > 0 and ow > 0)
conv_op = OpConfig(
type="conv2d",
inputs={
"Input": ["input_data"],
"Filter": ["weight_data"],
"Bias": ["conv_bias"]
},
outputs={"Output": ["conv_output_data"]},
attrs={
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides
})
scale_op = OpConfig(
type="scale",
inputs={"X": ["conv_output_data"]},
outputs={"Out": ["output_data"]},
attrs={"scale": scale,
"bias": scale_bias,
"bias_after_scale": True})
ops = [conv_op, scale_op]
program_config = ProgramConfig(
ops=ops,
weights={
"conv_bias": TensorConfig(shape=[weight_shape[0]]),
"weight_data": TensorConfig(shape=weight_shape)
},
inputs={"input_data": TensorConfig(shape=in_shape)},
outputs=["output_data"])
return program_config
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
fec483ec7ffc645dc6d83b08f1f7592805d9a5fc
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Jx4mjwEoFdfYuF9ky_10.py
|
a723d6a308abbe962a25372403471fc9bbe9f518
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
"""
Write a function that takes an integer and:
* If the number is a multiple of 3, return `"Hello"`.
* If the number is a multiple of 5, return `"World"`.
* If the number is a multiple of both 3 and 5, return `"Hello World"`.
### Examples
hello_world(3) ➞ "Hello"
hello_world(5) ➞ "World"
hello_world(15) ➞ "Hello World"
### Notes
Don't forget to `return` the result.
"""
def hello_world(num):
if num%15==0:
return ("Hello World")
elif num%5==0:
return ("World")
elif num%3==0:
return ("Hello")
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
3ecf9b834c4eb9b27f4030875f86d478ca91f7a7
|
f8dd1dfb0f81de16b9c8f681c85c6995b63ce037
|
/tensorflow/contrib/estimator/__init__.py
|
6b9f9575b606f1822d760e8597c55994dd8af04c
|
[
"Apache-2.0"
] |
permissive
|
DandelionCN/tensorflow
|
74688926778ae06da1f406967baf6b251b3f3c4e
|
1712002ad02f044f7569224bf465e0ea00e6a6c4
|
refs/heads/master
| 2020-03-06T19:10:37.847848
| 2018-03-27T17:11:49
| 2018-03-27T17:11:49
| 127,022,134
| 1
| 0
|
Apache-2.0
| 2018-03-27T17:24:51
| 2018-03-27T17:24:51
| null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental utilities re:tf.estimator.*."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.estimator.python.estimator.dnn import *
from tensorflow.contrib.estimator.python.estimator.dnn_linear_combined import *
from tensorflow.contrib.estimator.python.estimator.extenders import *
from tensorflow.contrib.estimator.python.estimator.head import *
from tensorflow.contrib.estimator.python.estimator.linear import *
from tensorflow.contrib.estimator.python.estimator.logit_fns import *
from tensorflow.contrib.estimator.python.estimator.multi_head import *
from tensorflow.contrib.estimator.python.estimator.replicate_model_fn import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
'add_metrics',
'binary_classification_head',
'clip_gradients_by_norm',
'forward_features',
'multi_class_head',
'multi_head',
'multi_label_head',
'poisson_regression_head',
'regression_head',
'DNNEstimator',
'DNNLinearCombinedEstimator',
'LinearEstimator',
'call_logit_fn',
'dnn_logit_fn_builder',
'linear_logit_fn_builder',
'replicate_model_fn',
'TowerOptimizer',
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
4d1e52f60ebc4c8d4a60d85a6e0d46289da1a4c4
|
f176975a314b6f8f4c7b931c6057caf20988d12d
|
/problems/uri_2448_postman/uri_2448_postman.py
|
dbf9f4c68510cb044c5e4c8853107f7c203b51a4
|
[] |
no_license
|
fgmacedo/problems_ads
|
4b3226307e66a37fd1848dcc25f3fa6c78567d98
|
d510a9f8788f99c2559efddd54235cb3a134989a
|
refs/heads/main
| 2023-05-20T03:29:57.814018
| 2021-06-15T01:54:56
| 2021-06-15T01:54:56
| 352,163,858
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
#!/usr/bin/env python3
import sys
rl = sys.stdin.readline
rl() # discart n, m info
houses = {x: idx for idx, x in enumerate(rl().split())}
cum_time = 0
current_house_index = 0
for order in rl().split():
order_house_index = houses[order]
cum_time = cum_time + abs(order_house_index - current_house_index)
current_house_index = order_house_index
sys.stdout.write(f"{cum_time}\n")
|
[
"fgmacedo@gmail.com"
] |
fgmacedo@gmail.com
|
3aa4f597847a981fc4c28f61c442c768e551b919
|
2d3aba0bf1d3a5e018ded78218859b31dd0930dd
|
/3.문자열/str_to_int.py
|
253d92331801b779fa6170d23a73a965f2dfaee0
|
[] |
no_license
|
CS-for-non-CS/Data-Structure
|
7018203de7d14a0be7da2308963082b93fac8e21
|
efce4c13578bd3d143aa570e9317c505b6424c40
|
refs/heads/master
| 2022-12-17T14:04:05.521164
| 2020-09-21T02:18:24
| 2020-09-21T02:18:24
| 297,205,549
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
str1 = "123"
str2 = "12.3"
print(int(str1),type(int(str1))) # 123
print(float(str2),type(float(str2))) # 12.3
str3 = "1+2"
print(str3)
print(repr(str3))
print(eval(str3))
print(eval(repr(str3)))
print(eval(eval(repr(str3))))
num1 = 123
num2 = 12.3
print(str(num1),type(str(num1)))
print(repr(num1),type(repr(num1)))
print(str(num2),type(str(num2)))
print(repr(num2),type(repr(num2)))
|
[
"onsy2788@gmail.com"
] |
onsy2788@gmail.com
|
4ffe88ba899c6533dbf898c44501f57ee3a17dcc
|
714b28c006b3c60aa87714f8777a37486b94e995
|
/accounts/migrations/0006_auto_20210522_1401.py
|
992a110f8f0095db91399a635e9b3b4465af91f9
|
[] |
no_license
|
kyrios213/django_tutorial
|
3f0bdce5c0e5faa4f7e08a238ac6d77bba35c92e
|
771d209c4b198df9361254deefd1c9a49c4a0746
|
refs/heads/main
| 2023-04-25T23:11:06.356823
| 2021-05-30T05:31:32
| 2021-05-30T05:31:32
| 368,026,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
# Generated by Django 3.2.3 on 2021-05-22 06:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20210519_1035'),
]
operations = [
migrations.AddField(
model_name='order',
name='note',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(blank=True, null=True),
),
]
|
[
"kyrios213@gmail.com"
] |
kyrios213@gmail.com
|
decc0276a133d3ca4d2bfdc0f34fc1ff7ee92055
|
a1730de4b50c17ecd388a995a1526c2eab80cb7d
|
/Plugins/Aspose-Cells-Java-for-Python/setup.py
|
49e93716140f94069b1d526135d2a7a8348415f5
|
[
"MIT"
] |
permissive
|
aspose-cells/Aspose.Cells-for-Java
|
2dcba41fc99b0f4b3c089f2ff1a3bcd32591eea1
|
42d501da827058d07df7399ae104bb2eb88929c3
|
refs/heads/master
| 2023-09-04T21:35:15.198721
| 2023-08-10T09:26:41
| 2023-08-10T09:26:41
| 2,849,714
| 133
| 89
|
MIT
| 2023-03-07T09:39:29
| 2011-11-25T13:16:33
|
Java
|
UTF-8
|
Python
| false
| false
| 705
|
py
|
__author__ = 'fahadadeel'
from setuptools import setup, find_packages
setup(
name = 'aspose-cells-java-for-python',
packages = find_packages(),
version = '1.0',
description = 'Aspose.cells Java for Python is a project that demonstrates / provides the Aspose.Cells for Java API usage examples in Python.',
author='Fahad Adeel',
author_email='cells@aspose.com',
url='https://github.com/asposecells/Aspose_Cells_Java/tree/master/Plugins/Aspose-Cells-Java-for-Python',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
|
[
"fahadadeel@gmail.com"
] |
fahadadeel@gmail.com
|
d615b760898802dc9155d05c5fee311838b3ece0
|
485be21ebe0a956b7f4a681968e160a463903ecc
|
/KnowledgedRank/BoePRFReranker.py
|
59319910dfd622e0a334dfd716a1ba920c9b8fb2
|
[] |
no_license
|
xiongchenyan/cxPyLib
|
e49da79345006d75a4261a8bbd4cc9a7f730fad2
|
8d87f5a872458d56276a2a2b0533170ede4d5851
|
refs/heads/master
| 2021-01-10T20:43:20.147286
| 2016-01-14T04:02:45
| 2016-01-14T04:02:45
| 17,610,431
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,418
|
py
|
'''
Created on Dec 7, 2015 7:24:56 PM
@author: cx
what I do:
I rerank doc in the BOE space
with simple PRF re-ranking
what's my input:
doc with hEntity
what's my output:
evaluation results
'''
import site
site.addsitedir('/bos/usr0/cx/PyCode/cxPyLib')
from cxBase.base import cxBaseC
from cxBase.Conf import cxConfC
import logging,json
import math
from KnowledgedRank.BoeReranker import *
class BoePRFRerankerC(BoeLmRankerC):
def Init(self):
BoeLmRankerC.Init(self)
self.WOrigQ = 0.5
self.NumOfExpEntity = 20
def SetConf(self, ConfIn):
BoeLmRankerC.SetConf(self, ConfIn)
self.WOrigQ = float(self.conf.GetConf('worigq', self.WOrigQ))
self.NumOfExpEntity = int(self.conf.GetConf('numofexp', self.NumOfExpEntity))
@staticmethod
def ShowConf():
BoeLmRankerC.ShowConf()
print 'worigq 0.5\nnumofexp 20'
def QExp(self,qid,query,lDoc):
hEntityScore = {} #ObjId -> prf score
for doc in lDoc:
if not doc.DocNo in self.hDocKg:
continue
hDocEntity = self.hDocKg[doc.DocNo]
for ObjId,score in hDocEntity.items():
score += doc.score #log(a) + log(b)
if not ObjId in hEntityScore:
hEntityScore[ObjId] = math.exp(score)
else:
hEntityScore[ObjId] += math.exp(score)
lEntityScore = hEntityScore.items()
lEntityScore.sort(key=lambda item:item[1],reverse = True)
lEntityScore = lEntityScore[:self.NumOfExpEntity]
Z = sum([item[1] for item in lEntityScore])
if Z == 0:
lEntityScore = []
else:
lEntityScore = [[item[0],item[1] / float(Z)] for item in lEntityScore]
logging.info(
'[%s][%s] exp entity: %s',
qid,
query,
json.dumps(lEntityScore)
)
return lEntityScore
def RankScoreForDoc(self,lQObjScore,doc):
if not doc.DocNo in self.hDocKg:
return self.Inferencer.MinWeight
hDocEntity = self.hDocKg[doc.DocNo]
score = 0
for ObjId,weight in lQObjScore:
ObjScore = self.Inferencer.inference(ObjId, hDocEntity,doc)
score += ObjScore * weight
# logging.info('[%s] [%s] - [%s] obj score: %f',qid,doc.DocNo,ObjId,ObjScore)
# logging.info('[%s] [%s] ranking score: %f',qid,doc.DocNo,score)
return score
def Rank(self, qid, query, lDoc):
lQObj = []
if qid in self.hQObj:
lQObj = self.hQObj[qid]
lExpEntityScore = self.QExp(qid, query, lDoc)
lQExpObjScore = [[ObjId,self.WOrigQ * score] for ObjId,score in lQObj]
lQExpObjScore += [
[ObjId,score * (1.0 - self.WOrigQ)]
for ObjId,score in lExpEntityScore
]
lScore = [self.RankScoreForDoc(lQExpObjScore, doc) for doc in lDoc]
lMid = zip(lDoc,lScore)
lDocNoScore = [[item[0].DocNo,item[1],item[0].score] for item in lMid]
#sort doc by two keys, if boe scores tie, use original ranking score
lDocNoScore.sort(key=lambda item: (item[1],item[2]), reverse = True)
lRankRes = [item[0] for item in lDocNoScore]
return lRankRes
if __name__=='__main__':
import sys,os
from AdhocEva.RankerEvaluator import RankerEvaluatorC
if 2 != len(sys.argv):
print 'I evaluate Boe exp model '
print 'in\nout'
BoePRFRerankerC.ShowConf()
RankerEvaluatorC.ShowConf()
sys.exit()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
# ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
conf = cxConfC(sys.argv[1])
QIn = conf.GetConf('in')
EvaOut = conf.GetConf('out')
Ranker = BoePRFRerankerC(sys.argv[1])
Evaluator = RankerEvaluatorC(sys.argv[1])
Evaluator.Evaluate(QIn, Ranker.Rank, EvaOut)
|
[
"xiongchenyan@gmail.com"
] |
xiongchenyan@gmail.com
|
e758759b714c65ed9bcc448e5fe5615004c2826b
|
336d52bb53eb24d09e8433018525fa54aa7f1592
|
/Agents/Actor_Critic_Agents/DDPG.py
|
ad6aa0593f8c9d0c9925aaa9282afb929428cf7d
|
[] |
no_license
|
crashmatt/Deep-Reinforcement-Learning-Algorithms-with-PyTorch
|
8a1901344df0fc499731515cbd53670c77c9c677
|
9c487dc51a483d2130cb9bb2a4d771f9748949cb
|
refs/heads/master
| 2020-05-16T06:20:14.048294
| 2019-04-22T16:38:02
| 2019-04-22T16:38:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,653
|
py
|
import copy
import torch
import torch.nn.functional as functional
from nn_builder.pytorch.NN import NN
from torch import optim
from Base_Agent import Base_Agent
from Replay_Buffer import Replay_Buffer
from Utilities.OU_Noise import OU_Noise
class DDPG(Base_Agent):
"""A DDPG Agent"""
agent_name = "DDPG"
def __init__(self, config):
Base_Agent.__init__(self, config)
self.hyperparameters = config.hyperparameters
self.critic_local = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic")
self.critic_target = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic")
self.critic_target.load_state_dict(copy.deepcopy(self.critic_local.state_dict()))
self.critic_optimizer = optim.Adam(self.critic_local.parameters(),
lr=self.hyperparameters["Critic"]["learning_rate"])
self.memory = Replay_Buffer(self.hyperparameters["Critic"]["buffer_size"], self.hyperparameters["batch_size"],
self.config.seed)
self.actor_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Actor")
self.actor_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use="Actor")
self.actor_target.load_state_dict(copy.deepcopy(self.actor_local.state_dict()))
self.actor_optimizer = optim.Adam(self.actor_local.parameters(),
lr=self.hyperparameters["Actor"]["learning_rate"])
self.noise = OU_Noise(self.action_size, self.config.seed, self.hyperparameters["mu"],
self.hyperparameters["theta"], self.hyperparameters["sigma"])
def reset_game(self):
"""Resets the game information so we are ready to play a new episode"""
Base_Agent.reset_game(self)
self.noise.reset()
def step(self):
"""Runs a step in the game"""
while not self.done:
self.action = self.pick_action()
self.conduct_action(self.action)
if self.time_for_critic_and_actor_to_learn():
for _ in range(self.hyperparameters["learning_updates_per_learning_session"]):
states, actions, rewards, next_states, dones = self.memory.sample() # Sample experiences
self.critic_learn(states, actions, rewards, next_states, dones)
self.actor_learn(states)
self.save_experience()
self.state = self.next_state #this is to set the state for the next iteration
self.global_step_number += 1
self.episode_number += 1
def pick_action(self):
"""Picks an action using the actor network and then adds some noise to it to ensure exploration"""
state = torch.from_numpy(self.state).float().unsqueeze(0).to(self.device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
action += self.noise.sample()
return action.squeeze(0)
def critic_learn(self, states, actions, rewards, next_states, dones):
"""Runs a learning iteration for the critic"""
loss = self.compute_loss(states, next_states, rewards, actions, dones)
self.take_optimisation_step(self.critic_optimizer, self.critic_local, loss, self.hyperparameters["Critic"]["gradient_clipping_norm"])
self.soft_update_of_target_network(self.critic_local, self.critic_target, self.hyperparameters["Critic"]["tau"])
def compute_loss(self, states, next_states, rewards, actions, dones):
"""Computes the loss for the critic"""
with torch.no_grad():
critic_targets = self.compute_critic_targets(next_states, rewards, dones)
critic_expected = self.compute_expected_critic_values(states, actions)
loss = functional.mse_loss(critic_expected, critic_targets)
return loss
def compute_critic_targets(self, next_states, rewards, dones):
"""Computes the critic target values to be used in the loss for the critic"""
critic_targets_next = self.compute_critic_values_for_next_states(next_states)
critic_targets = self.compute_critic_values_for_current_states(rewards, critic_targets_next, dones)
return critic_targets
def compute_critic_values_for_next_states(self, next_states):
"""Computes the critic values for next states to be used in the loss for the critic"""
with torch.no_grad():
actions_next = self.actor_target(next_states)
critic_targets_next = self.critic_target(torch.cat((next_states, actions_next), 1))
return critic_targets_next
def compute_critic_values_for_current_states(self, rewards, critic_targets_next, dones):
"""Computes the critic values for current states to be used in the loss for the critic"""
critic_targets_current = rewards + (self.hyperparameters["discount_rate"] * critic_targets_next * (1.0 - dones))
return critic_targets_current
def compute_expected_critic_values(self, states, actions):
"""Computes the expected critic values to be used in the loss for the critic"""
critic_expected = self.critic_local(torch.cat((states, actions), 1))
return critic_expected
def time_for_critic_and_actor_to_learn(self):
"""Returns boolean indicating whether there are enough experiences to learn from and it is time to learn for the
actor and critic"""
return self.enough_experiences_to_learn_from() and self.global_step_number % self.hyperparameters["update_every_n_steps"] == 0
def actor_learn(self, states):
"""Runs a learning iteration for the actor"""
if self.done: #we only update the learning rate at end of each episode
self.update_learning_rate(self.hyperparameters["Actor"]["learning_rate"], self.actor_optimizer)
actor_loss = self.calculate_actor_loss(states)
self.take_optimisation_step(self.actor_optimizer, self.actor_local, actor_loss,
self.hyperparameters["Actor"]["gradient_clipping_norm"])
self.soft_update_of_target_network(self.actor_local, self.actor_target, self.hyperparameters["Actor"]["tau"])
def calculate_actor_loss(self, states):
"""Calculates the loss for the actor"""
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(torch.cat((states, actions_pred), 1)).mean()
return actor_loss
|
[
"p.christodoulou2@gmail.com"
] |
p.christodoulou2@gmail.com
|
71ac3b38241ab179de7aa4edc58a6750b7cb02a3
|
4ddc6604f0c8160c7637d036b835faf974d48556
|
/nova/policies/networks.py
|
a4d065f47d0a291902d07878202cf7f44eb9cdf1
|
[
"Apache-2.0"
] |
permissive
|
tjjh89017/nova
|
a8513a806f24ca0d1c60495fd1f192b7d402b05d
|
49b85bd2e9c77c6e0bd8141b38cd49efa5c06dc2
|
refs/heads/master
| 2021-01-21T10:16:18.970238
| 2017-05-18T10:35:32
| 2017-05-18T10:35:32
| 91,682,422
| 1
| 0
| null | 2017-05-18T10:50:38
| 2017-05-18T10:50:38
| null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-networks'
POLICY_ROOT = 'os_compute_api:os-networks:%s'
networks_policies = [
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_API),
policy.RuleDefault(
name=POLICY_ROOT % 'view',
check_str=base.RULE_ADMIN_OR_OWNER),
]
def list_rules():
return networks_policies
|
[
"cbelu@cloudbasesolutions.com"
] |
cbelu@cloudbasesolutions.com
|
0980ec9b29cae8ca8eb4d166d4157dbe4b3c392b
|
4ce5022078c53b3bd75493b12a38237618b52fc8
|
/prodsys/migrations/0068_job_number_of_events.py
|
c18fcdcbc318a34d2627aee7d52bbe11aa900c43
|
[] |
no_license
|
virthead/COMPASS-ProdSys
|
90180e32c3a23d9fd05b252a6f8ded234525a780
|
6dfaa3e9ca40845282d3004ac61f386db5abdbe9
|
refs/heads/master
| 2023-02-23T18:16:02.789709
| 2022-09-28T09:37:59
| 2022-09-28T09:37:59
| 144,685,667
| 0
| 1
| null | 2018-10-13T10:07:42
| 2018-08-14T07:38:34
|
Python
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-14 13:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prodsys', '0067_task_files_source'),
]
operations = [
migrations.AddField(
model_name='job',
name='number_of_events',
field=models.IntegerField(default=0),
),
]
|
[
"root@vm221-123.jinr.ru"
] |
root@vm221-123.jinr.ru
|
afe2e3497fcf2748a39df150b3000ee0cd199b92
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/documentdb/v20210301preview/get_sql_resource_sql_stored_procedure.py
|
ad0982c8a942b15814d0f916e6958ee808ba44f3
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,150
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSqlResourceSqlStoredProcedureResult',
'AwaitableGetSqlResourceSqlStoredProcedureResult',
'get_sql_resource_sql_stored_procedure',
'get_sql_resource_sql_stored_procedure_output',
]
@pulumi.output_type
class GetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, resource=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource and not isinstance(resource, dict):
raise TypeError("Expected argument 'resource' to be a dict")
pulumi.set(__self__, "resource", resource)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the ARM resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> Optional['outputs.SqlStoredProcedureGetPropertiesResponseResource']:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlResourceSqlStoredProcedureResult(GetSqlResourceSqlStoredProcedureResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlResourceSqlStoredProcedureResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
resource=self.resource,
tags=self.tags,
type=self.type)
def get_sql_resource_sql_stored_procedure(account_name: Optional[str] = None,
container_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
stored_procedure_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str stored_procedure_name: Cosmos DB storedProcedure name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['storedProcedureName'] = stored_procedure_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20210301preview:getSqlResourceSqlStoredProcedure', __args__, opts=opts, typ=GetSqlResourceSqlStoredProcedureResult).value
return AwaitableGetSqlResourceSqlStoredProcedureResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
resource=__ret__.resource,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_sql_resource_sql_stored_procedure)
def get_sql_resource_sql_stored_procedure_output(account_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
stored_procedure_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSqlResourceSqlStoredProcedureResult]:
"""
An Azure Cosmos DB storedProcedure.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str stored_procedure_name: Cosmos DB storedProcedure name.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
6456afdcfb72444d01ad09e4f851c86cb9b4ddef
|
d3cabb25e9af022fa3ca7818668a3267c16f31ed
|
/queroMeiaWebapp/settings.py
|
1501cd1e7a333c3b286235d56186badea80dcd3e
|
[] |
no_license
|
fafaschiavo/mobileQueroMeiaWebapp
|
6e8df6bdb17ad82b0d1c43a8d78f71e4fd4dccb4
|
83584cf81f7a28b36fa9a699986aaf111d4b3eb5
|
refs/heads/master
| 2021-01-09T20:52:37.544906
| 2016-07-11T16:31:49
| 2016-07-11T16:31:49
| 58,693,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,420
|
py
|
"""
Django settings for queroMeiaWebapp project.
Generated by 'django-admin startproject' using Django 1.10.dev20160307181939.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_gaqx%)0dc8=hd4m5!_v5a4sn)egl1#k21_kqs0*mxz571!zyq'
#Paypal Information
# EMAIL_PAYPAL_ACCOUNT = 'fafaschiavo@msn.com'
# EMAIL_PAYPAL_ACCOUNT = 'judelucca.19@gmail.com'
EMAIL_PAYPAL_ACCOUNT = 'atendimento@queromeia.com'
PRODUCT_ID_1 = 3
PRODUCT_ID_2 = 4
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# PAYPAL_TEST = True
MANDRILL_API_KEY = "PQsvG3uAlMUoboU2fQoGHg"
EMAIL_BACKEND = "djrill.mail.backends.djrill.DjrillBackend"
DEFAULT_FROM_EMAIL = 'atendimento@queromeia.com'
MANDRILL_API_URL = "https://mandrillapp.com/api/1.0"
# Application definition
INSTALLED_APPS = [
'paypal.standard.ipn',
'djrill',
'cinema.apps.CinemaConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'queroMeiaWebapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'queroMeiaWebapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'quero_meia',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# 'NAME': 'quero_meia', # Or path to database file if using sqlite3.
# 'USER': 'fafaschiavo', # Not used with sqlite3.
# 'PASSWORD': '310308Fah!', # Not used with sqlite3.
# 'HOST': 'mysql.queromeia.com', # Set to empty string for localhost. Not used with sqlite3.
# 'PORT': '', # Set to empty string for default. Not used with sqlite3.
# }
# }
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
|
[
"fayschiavo@gmail.com"
] |
fayschiavo@gmail.com
|
9a497a06ee18928dfc7bc17f59d25523f920e47e
|
671067c93d251635ed1360936c7ec84a59ece10c
|
/doublecop.py
|
419ad0c817fd5955ddadc9233606416bb494dcd7
|
[
"BSD-2-Clause"
] |
permissive
|
nd1511/ccw_tutorial_theano
|
48773052ec99da95aa50300399c943834ca29435
|
f92aa8edbb567c9ac09149a382858f841a4a7749
|
refs/heads/master
| 2020-04-03T13:10:35.753232
| 2017-02-01T21:54:14
| 2017-02-01T21:54:14
| 155,276,374
| 1
| 0
|
BSD-2-Clause
| 2018-10-29T20:25:01
| 2018-10-29T20:25:01
| null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
from theano import Apply
from theano.gof import COp
from theano.tensor import as_tensor_variable
class DoubleCOp(COp):
__props__ = ()
def __init__(self):
COp.__init__(self, ["doublecop.c"],
"APPLY_SPECIFIC(doublecop)")
def make_node(self, x):
x = as_tensor_variable(x)
if x.ndim != 1:
raise TypeError("DoubleCOp only works with 1D")
return Apply(self, [x], [x.type()])
def infer_shape(self, input_shapes):
return input_shapes
def grad(self, inputs, g):
return [g[0] * 2]
|
[
"abergeron@gmail.com"
] |
abergeron@gmail.com
|
aa342583e7f64224e167db39abc398760268e22e
|
187ec84de1e03e2fe1e154dcb128b5886b4d0547
|
/chapter_05/exercises/05_alien_colors_3.py
|
bba30d284c7891d8e409c681d5c751e6804d47bc
|
[] |
no_license
|
xerifeazeitona/PCC_Basics
|
fcbc1b8d5bc06e82794cd9ff0061e6ff1a38a64e
|
81195f17e7466c416f97acbf7046d8084829f77b
|
refs/heads/main
| 2023-03-01T07:50:02.317941
| 2021-01-27T21:08:28
| 2021-01-27T21:08:28
| 330,748,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
# 5-5. Alien Colors #3
# Turn your if-else chain from Exercise 5-4 into an if-elif-else chain.
# If the alien is green, print a message that the player earned 5
# points.
alien_color = 'green'
if alien_color == 'green':
print('You just earned 5 points!')
elif alien_color == 'yellow':
print('You just earned 10 points!')
else:
print('You just earned 15 points!')
# If the alien is yellow, print a message that the player earned 10
# points.
alien_color = 'yellow'
if alien_color == 'green':
print('You just earned 5 points!')
elif alien_color == 'yellow':
print('You just earned 10 points!')
else:
print('You just earned 15 points!')
# If the alien is red, print a message that the player earned 15 points.
alien_color = 'red'
if alien_color == 'green':
print('You just earned 5 points!')
elif alien_color == 'yellow':
print('You just earned 10 points!')
else:
print('You just earned 15 points!')
# Write three versions of this program, making sure each message is
# printed for the appropriate color alien.
|
[
"juliano.amaral@gmail.com"
] |
juliano.amaral@gmail.com
|
cf1c95226b738e88e5ece8b394896f8d6b81bf09
|
d806dd4a6791382813d2136283a602207fb4b43c
|
/sirius/blueprints/api/remote_service/tula/passive/hospitalization/xform.py
|
e1eb90b2defe6e898e8d65d353d0acc7a8ea2d35
|
[] |
no_license
|
MarsStirner/sirius
|
5bbf2a03dafb7248db481e13aff63ff989fabbc2
|
8839460726cca080ca8549bacd3a498e519c8f96
|
refs/heads/master
| 2021-03-24T12:09:14.673193
| 2017-06-06T16:28:53
| 2017-06-06T16:28:53
| 96,042,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
#! coding:utf-8
"""
@author: BARS Group
@date: 13.10.2016
"""
from sirius.lib.xform import XForm
from sirius.blueprints.api.remote_service.tula.entities import TulaEntityCode
from sirius.blueprints.api.remote_service.tula.passive.hospitalization.schemas import \
HospitalizationSchema
from sirius.models.system import SystemCode
class HospitalizationTulaXForm(HospitalizationSchema, XForm):
remote_system_code = SystemCode.TULA
entity_code = TulaEntityCode.MEASURE_HOSPITALIZATION
|
[
"paschenko@bars-open.ru"
] |
paschenko@bars-open.ru
|
22c4d7f96a6349a7d19d0b2069f885a37474aa47
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/plotly/py2/plotly/validators/sankey/textfont/__init__.py
|
7a16a4ec501428eb068d80f780f31eca40f57f29
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,471
|
py
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="size", parent_name="sankey.textfont", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="sankey.textfont", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="sankey.textfont", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
[
"robot-piglet@yandex-team.com"
] |
robot-piglet@yandex-team.com
|
a83c3362a529d970c8d74dc9a41e928ad7f6aa12
|
36764bbdbe3dd6bb12cd8eb78e4b8f889bd65af0
|
/mysortmat.py
|
b231fa573b16f020be2aaa0e3b636ee9e073a985
|
[] |
no_license
|
tristaaa/lcproblems
|
18e01da857c16f69d33727fd7dcc821c09149842
|
167a196a9c36f0eaf3d94b07919f4ed138cf4728
|
refs/heads/master
| 2020-05-21T14:38:14.920465
| 2020-02-23T01:49:23
| 2020-02-23T01:49:23
| 186,085,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
class Solution(object):
def mySortMat(self, mat):
"""
sort the input matrix, size of n*n, and the output should be in this order
[[9,8,6],
[7,5,3],
[4,2,1]]
:type mat: List[List[int]]
:rtype: List[List[int]]
"""
n = len(mat)
arr = []
for i in range(n):
arr+=mat[i]
arr.sort(reverse=True)
# print(arr)
result=[[0]*n for i in range(n)]
for i in range(n):
fn=i*(i+1)//2
if i!=n-1:
for j in range(i+1):
result[j][i-j] = arr[fn+j]
result[n-1-j][n-1-i+j] = arr[n*n-1-fn-j]
else:
for j in range(i//2+1):
result[j][i-j] = arr[fn+j]
result[n-1-j][n-1-i+j] = arr[n*n-1-fn-j]
return result
sol=Solution()
mat=[
[ 5, 1, 9, 11],
[ 2, 4, 8, 10],
[13, 3, 6, 7],
[15, 14, 12, 0]
]
mat1=[
[ 5, 1, 9],
[ 2, 4, 8],
[13, 3, 6]
]
print("Given the input matrix: [")
for i in range(len(mat)):
print(mat[i])
print("]")
print("the sorted matrix is: [")
res=sol.mySortMat(mat)
for i in range(len(res)):
print(res[i])
print("]")
print("Given the input matrix: [")
for i in range(len(mat1)):
print(mat1[i])
print("]")
print("the sorted matrix is: [")
res=sol.mySortMat(mat1)
for i in range(len(res)):
print(res[i])
print("]")
|
[
"tristaaa56@126.com"
] |
tristaaa56@126.com
|
131da4ef6887fa5704722436717046f8e50c0a34
|
2f0bde4d37b7ea1aad91ab44b5b4526d0bec30ce
|
/examples/strike-slip-example/okada_driver.py
|
b09fae0728d457d440530d09f1f90b57ca4f9062
|
[
"MIT"
] |
permissive
|
kmaterna/Elastic_stresses_py
|
5c78a628136f610ec68e7ee38d8bc76515319e4f
|
549a13c6c7fa3c80aac9d63548fdbf3b1ec7b082
|
refs/heads/master
| 2023-08-28T21:54:42.500337
| 2023-08-18T01:45:18
| 2023-08-18T01:45:18
| 141,371,162
| 42
| 11
|
MIT
| 2022-08-09T14:22:15
| 2018-07-18T02:37:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
#!/usr/bin/env python
import Elastic_stresses_py.PyCoulomb.fault_slip_object as fso
from Elastic_stresses_py.PyCoulomb import run_dc3d, configure_calc, output_manager, io_additionals
# Definitions
lon0_sys, lat0_sys = -120.5, 36;
bbox = (-121.5, -119.5, 35.2, 36.8);
lonlatfile = "Inputs/lon_lats.txt";
source_slip_dist = "Inputs/s2004PARKFI01CUST.fsp";
# Inputs
parkfield_faults = fso.file_io.io_srcmod.read_srcmod_distribution(source_slip_dist);
coulomb_fault_model = fso.fault_slip_object.fault_object_to_coulomb_fault(parkfield_faults, lon0_sys, lat0_sys);
disp_points = io_additionals.read_disp_points(lonlatfile);
# Configure, Compute, Output
params = configure_calc.configure_default_displacement_params();
inputs = configure_calc.configure_default_displacement_input(coulomb_fault_model, zerolon=lon0_sys,
zerolat=lat0_sys, bbox=bbox, domainsize=100);
outobj = run_dc3d.do_stress_computation(params, inputs, disp_points=disp_points, strain_points=[]);
output_manager.produce_outputs(params, inputs, disp_points, obs_strain_points=[], out_object=outobj);
|
[
"kathrynmaterna@gmail.com"
] |
kathrynmaterna@gmail.com
|
f3e8df6eeb1ec9952a151a19f157255fcab78423
|
1ee9081e345c125eddaa88931197aed0265aafb8
|
/glearn/task_scheduler/__init__.py
|
0dcd17d4997e7fd770ca54277a53d1ef15fe2dca
|
[] |
no_license
|
WeiShiwei/tornado_classify
|
1d45bc16473842fea8d853ba5e2c57a773fed978
|
57faa997c205630c7f84a64db0c2f5ffd8fda12a
|
refs/heads/master
| 2021-01-01T04:44:53.981312
| 2016-05-02T12:06:29
| 2016-05-02T12:06:29
| 57,887,029
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
# -*- coding: utf-8 -*-
from tasks_classify import gldjc, gldzb
IDENTITY_APP_DICT = {
'gldjc':gldjc,
'gldzb':gldzb
}
class TaskScheduler(object):
"""docstring for TaskScheduler"""
def __init__(self, arg):
super(TaskScheduler, self).__init__()
self.arg = arg
@classmethod
def apply_async(self, identity, docs):
# import pdb;pdb.set_trace()
try:
res = IDENTITY_APP_DICT[identity].predict.apply_async( (identity, docs), queue = identity )
except KeyError, e:
print e
res = None
return res
|
[
"weishiwei920@163.com"
] |
weishiwei920@163.com
|
58a0b89d9a44e9b44d96235ba45354df6142d209
|
b15848c78b6ed07d27cae74b90ae99a27d7acf24
|
/DataParser/DataParser/settings.py
|
dae1081c2349a6f3414aead9e32dbee48c5bbd29
|
[
"MIT"
] |
permissive
|
CodeBoss86/DataParser
|
ba988462de6e1cc1ae156e3407fbdea06fa5efc8
|
c9e09f0975145a4ca0a3645699ee91adee49cd2c
|
refs/heads/main
| 2023-01-19T01:51:31.178645
| 2020-11-17T13:38:47
| 2020-11-17T13:38:47
| 316,596,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,872
|
py
|
"""
Django settings for DataParser project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
# from corsheaders.defaults import default_headers
import os
from dotenv import load_dotenv
from pathlib import Path
# from celery.schedules import crontab
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
ENV_PATH = BASE_DIR / '.env'
load_dotenv(ENV_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG')
splittedHosts = os.getenv('ALLOWED_HOSTS').split(',')
ALLOWED_HOSTS = splittedHosts
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
DJANGO_ALLOW_ASYNC_UNSAFE = True
ROOT_URLCONF = 'DataParser.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DataParser.wsgi.application'
ASGI_APPLICATION = 'DataParser.asgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
ASGI_APPLICATION = 'DataParser.routing.application'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = False
|
[
"mattode@outlook.com"
] |
mattode@outlook.com
|
ca34e03ef4a90a8d5d4c34a0ada17be32fc3c867
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_evan176_solve.py
|
176dc3d014d5ea7ed28dc4e8bea96de713789acf
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import math
import time
def compute(word):
result = [word[0]]
for i in range(1, len(word)):
alpha = word[i]
if alpha >= result[0]:
result.insert(0, alpha)
else:
result.append(alpha)
return ''.join(result)
if __name__ == "__main__":
with open(sys.argv[1], 'r') as f:
cases = int(f.readline())
for i in range(cases):
word = f.readline().strip()
result = compute(word)
print('Case #{}: {}'.format(i+1, result))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
62fc3c89e7939ee66309da0c228d3a0ca205b6c6
|
71eb367210e8ffd3b4964a8c99e3ac6f2920fdbb
|
/wedding/management/commands/make_backup.py
|
f92cd208fa723e3a4afbcc78c347424c2bb91e03
|
[
"MIT"
] |
permissive
|
jsayles/wedding
|
392771dc894fb311414b2d34ceb4319318d8eefb
|
242d28d0271d58909b2c5ff5457d909efaecd3c0
|
refs/heads/master
| 2020-04-18T01:26:57.433729
| 2015-09-04T15:18:03
| 2015-09-04T15:18:03
| 28,720,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
import os
import time
import urllib
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from wedding.backup import BackupManager
class Command(BaseCommand):
help = "Creates a backup containing an SQL dump and the media files."
args = ""
requires_model_validation = False
def handle(self, *labels, **options):
manager = BackupManager()
print manager.make_backup()
# Copyright 2011 Trevor F. Smith (http://trevor.smith.name/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
[
"jsayles@gmail.com"
] |
jsayles@gmail.com
|
f8b71f47242faeeccc05326262d862d05d57a7fe
|
e7b7cc34f77c71e61aa0fa05bcc62f54fc2fc0e1
|
/BinarySearch/q374_guess_number_higher_or_lower.py
|
000686ff073c0f98c294124c4f8a8ca531d32f01
|
[] |
no_license
|
sevenhe716/LeetCode
|
41d2ef18f5cb317858c9b69d00bcccb743cbdf48
|
4a1747b6497305f3821612d9c358a6795b1690da
|
refs/heads/master
| 2020-03-16T16:12:27.461172
| 2019-04-22T13:27:54
| 2019-04-22T13:27:54
| 130,221,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
# Time: O(n)
# Space: O(1)
# 解题思路:
# 二分查找
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
import bisect
pick = 0
def guess(num):
if num == pick:
return 0
elif num > pick:
return -1
else:
return 1
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
lo, hi = 1, n
while lo <= hi:
mid = lo + (hi - lo) // 2
result = guess(mid)
if result == 0:
return mid
elif result == 1:
lo = mid + 1
else:
hi = mid - 1
return -1
# 三分查找,时间复杂度降为log3(2n)
class Solution1:
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
low, high = 1, n
while low <= high:
mid1 = low + (high - low) // 3
mid2 = high - (high - low) // 3
res1, res2 = guess(mid1), guess(mid2)
if res1 == 0:
return mid1
if res2 == 0:
return mid2
elif res1 < 0:
high = mid1 - 1
elif res2 > 0:
low = mid2 + 1
else:
low, high = mid1 + 1, mid2 - 1
return -1
def guessNumber1(self, n):
class C: __getitem__ = lambda _, i: -guess(i)
return bisect.bisect(C(), -1, 1, n)
|
[
"429134862@qq.com"
] |
429134862@qq.com
|
b1347c88770f1eb0a81a06dfaf9e693cbf5b465a
|
b4afd14e3b4e9cff0a99906a69587e348b243aeb
|
/mocc/beida/pythonds/stackop.py
|
424989f76facb6d29739792e959118a1d1b1b7d9
|
[] |
no_license
|
zhankq/pythonlearn
|
d694df23826cda6ba662e852e531e96a10ab2092
|
cb714fbb8257193029f958e73e0f9bd6a68d77f1
|
refs/heads/master
| 2021-12-16T13:51:23.381206
| 2021-12-03T01:13:36
| 2021-12-03T01:13:36
| 205,632,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self,item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
|
[
"zhankq@163.com"
] |
zhankq@163.com
|
7a81a710368d8388719fd9da8283fa4d6989e5c2
|
d13ee6238418d047f9fe6ddbd5525fd0487d4233
|
/hc/front/tests/test_channels.py
|
1007a821f8abff8784f2b2d318f195c0357cf4d7
|
[
"BSD-3-Clause"
] |
permissive
|
iphoting/healthchecks
|
b4ffb7cd2a254c1a8daa490608ff4d5a96c560da
|
924fc7df60dbf97b82a1f82989507459802f7028
|
refs/heads/heroku
| 2022-03-06T08:32:11.626016
| 2019-10-07T14:37:20
| 2022-02-19T09:37:57
| 82,822,882
| 11
| 7
|
BSD-3-Clause
| 2021-09-28T07:59:39
| 2017-02-22T15:51:02
|
Python
|
UTF-8
|
Python
| false
| false
| 6,130
|
py
|
import json
from hc.api.models import Channel
from hc.test import BaseTestCase
class ChannelsTestCase(BaseTestCase):
def test_it_formats_complex_slack_value(self):
ch = Channel(kind="slack", project=self.project)
ch.value = json.dumps(
{
"ok": True,
"team_name": "foo-team",
"incoming_webhook": {"url": "http://example.org", "channel": "#bar"},
}
)
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "foo-team", status_code=200)
self.assertContains(r, "#bar")
def test_it_shows_webhook_post_data(self):
ch = Channel(kind="webhook", project=self.project)
ch.value = json.dumps(
{
"method_down": "POST",
"url_down": "http://down.example.com",
"body_down": "foobar",
"headers_down": {},
"method_up": "GET",
"url_up": "http://up.example.com",
"body_up": "",
"headers_up": {},
}
)
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
# These are inside a modal:
self.assertContains(r, "http://down.example.com")
self.assertContains(r, "http://up.example.com")
self.assertContains(r, "foobar")
def test_it_shows_pushover_details(self):
ch = Channel(kind="po", project=self.project)
ch.value = "fake-key|0"
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(normal priority)")
def test_it_shows_unconfirmed_email(self):
channel = Channel(project=self.project, kind="email")
channel.value = "alice@example.org"
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "Unconfirmed")
def test_it_shows_down_only_note_for_email(self):
channel = Channel(project=self.project, kind="email")
channel.value = json.dumps(
{"value": "alice@example.org", "up": False, "down": True}
)
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(down only)")
def test_it_shows_up_only_note_for_email(self):
channel = Channel(project=self.project, kind="email")
channel.value = json.dumps(
{"value": "alice@example.org", "up": True, "down": False}
)
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(up only)")
def test_it_shows_sms_number(self):
ch = Channel(kind="sms", project=self.project)
ch.value = json.dumps({"value": "+123"})
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "SMS to +123")
def test_it_shows_channel_issues_indicator(self):
Channel.objects.create(kind="sms", project=self.project, last_error="x")
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "broken-channels", status_code=200)
def test_it_hides_actions_from_readonly_users(self):
self.bobs_membership.role = "r"
self.bobs_membership.save()
Channel.objects.create(project=self.project, kind="webhook", value="{}")
self.client.login(username="bob@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertNotContains(r, "Add Integration", status_code=200)
self.assertNotContains(r, "ic-delete")
self.assertNotContains(r, "edit_webhook")
def test_it_shows_down_only_note_for_sms(self):
channel = Channel(project=self.project, kind="sms")
channel.value = json.dumps({"value": "+123123123", "up": False, "down": True})
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(down only)")
def test_it_shows_up_only_note_for_sms(self):
channel = Channel(project=self.project, kind="sms")
channel.value = json.dumps({"value": "+123123123", "up": True, "down": False})
channel.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertEqual(r.status_code, 200)
self.assertContains(r, "(up only)")
def test_it_shows_disabled_note(self):
ch = Channel(kind="slack", project=self.project)
ch.value = "https://example.org"
ch.disabled = True
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "label-danger", status_code=200)
def test_it_shows_fix_button_for_disabled_email(self):
ch = Channel(kind="email", project=self.project)
ch.value = "bob@example.org"
ch.disabled = True
ch.save()
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.channels_url)
self.assertContains(r, "Fix…", status_code=200)
|
[
"cuu508@gmail.com"
] |
cuu508@gmail.com
|
3cd329b8c34f33fda57e67ec19ffd58aa08cc7d6
|
6044266e775c87afed99397c8bb88366fbbca0e7
|
/scrapy_projt/python_itertools/zip_longest_fillvalue.py
|
b9edce215a1bab2bb5e70645bae16021409cd99a
|
[] |
no_license
|
ranafge/all-documnent-projects
|
e4434b821354076f486639419598fd54039fb5bd
|
c9d65ddea291c53b8e101357547ac63a36406ed9
|
refs/heads/main
| 2023-05-08T20:01:20.343856
| 2021-05-30T10:44:28
| 2021-05-30T10:44:28
| 372,186,355
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
from itertools import zip_longest
import re
from itertools import chain
dates = ['21/11/2044', '31/12/2018', '23/9/3000', '25/12/2007']
text = ['What are dates? ', ', is an example.\n', ', is another format as well.\n',
', also exists, but is a bit ludicrous\n', ', are examples but more commonly used']
print([w for x in zip_longest(text, dates, fillvalue='') for w in x if w])
ls = ['1 Paris-SG 42 20 13 3 4 +33',
'2 Lille 42 20 12 6 2 +20',
'3 Lyon 40 20 11 7 2 +20',
'4 Monaco 36 20 11 3 6 +10']
convert_2d_list = [i.split(maxsplit=2) for i in ls]
print(convert_2d_list)
my_list_dict = {
'L1': ['a', 'b', 'c', 'd'],
'L2': ['e', 'f', 'g', 'h']
}
def check_value_return_key(c):
for k, v in my_list_dict.items():
if c in v:
return k
else:
return None
print(check_value_return_key('g'))
def find_key(c):
for k, v in my_list_dict.items():
if c in v:
return k
else:
raise Exception("value '{}' not found".format(c))
find_key("a")
a = [[[5],[3]],[[4],[5]],[[6],[7]]]
print([list(chain.from_iterable(l)) for l in a])
my_list = [0, 1, 2, 2, 1, 20, 21, 21, 20, 3, 23, 22]
num_map = {j:i for i, j in enumerate(sorted(set(my_list)))}
print(num_map)
|
[
"ranafge@gmail.com"
] |
ranafge@gmail.com
|
b6c69394d9cb24e853932d6a9d1f96608694f81a
|
79b1d3d8ffbda5297fff6fefe2528e303bf2110a
|
/RSGGenFragment/RSToQQ/RSGravitonToQuarkQuark_W-0p25_M_1500_TuneCUETP8M1_13TeV_pythia8_cfi.py
|
6e503b562929e62717577f7d52137212a9732aca
|
[] |
no_license
|
yguler/MCFragments-1
|
25745a043653d02be3a4c242c1a85af221fc34b3
|
7c4d10ee59e00f997221109bf006819fd645b92f
|
refs/heads/master
| 2021-01-13T14:09:12.811554
| 2016-12-11T15:57:37
| 2016-12-11T15:57:37
| 76,184,433
| 0
| 0
| null | 2016-12-11T15:59:22
| 2016-12-11T15:59:22
| null |
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(0.00000782),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:ffbar2G* = on',
'ExtraDimensionsG*:kappaMG = 2.276101242',
'5100039:m0 = 1500',
'5100039:onMode = off',
'5100039:onIfAny = 1 2 3 4 5'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"emine.gurpinar@cern.ch"
] |
emine.gurpinar@cern.ch
|
567e938c3da300c10dac470fe7bba73fefa308e1
|
8ca34f6da28f4b2cb2ae7a242e2156581426a950
|
/apps/customer/migrations/0006_remove_job_job_type_remove_job_status.py
|
501fd52e93fbb98072802b9b099caa2cb8297ea6
|
[] |
no_license
|
gray-adeyi/prime
|
7e2360424560beb24742f93aa3f7b3b5cd484150
|
83b728db767e6f1b2237e10400fa95861ce1c8f3
|
refs/heads/main
| 2022-06-17T19:00:52.432315
| 2022-05-19T10:19:56
| 2022-05-19T10:19:56
| 225,469,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
# Generated by Django 4.0.3 on 2022-05-04 10:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0005_alter_job_copies'),
]
operations = [
migrations.RemoveField(
model_name='job',
name='job_type',
),
migrations.RemoveField(
model_name='job',
name='status',
),
]
|
[
"adeyigbenga005@gmail.com"
] |
adeyigbenga005@gmail.com
|
b581261136eb5820caa1c37ee4e42eee9145a808
|
32dda10669e459cf37c31f426fa709001d2c75b0
|
/leetcode_cn/solved/pg_709.py
|
3d384ea50d36704b8ae5931bf4436c70958659b5
|
[] |
no_license
|
fastso/learning-python
|
3300f50d06871245d0bfcbe9d201224580f70852
|
d21dbd1b9f31017cdb1ed9b9ffd1e53ffe326572
|
refs/heads/master
| 2023-02-10T14:43:53.726247
| 2023-01-26T10:14:59
| 2023-01-26T10:14:59
| 193,454,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
class Solution:
def toLowerCase(self, s: str) -> str:
l = list(s)
for i in range(len(l)):
o = ord(l[i])
if 64 < o < 91:
o += 32
l[i] = chr(o)
return ''.join(l)
|
[
"fastso.biko@gmail.com"
] |
fastso.biko@gmail.com
|
6e638314f02ee8aa6919f68c5b79ab506004a312
|
df9a467c0d47eafde9bf5d2181347ad00bf53c06
|
/leetcode/most_liked/739_daily_temperatures.py
|
b1783bb29cf96d7abdb26011f592ae371ea26b9f
|
[] |
no_license
|
eunjungchoi/algorithm
|
63d904d92e16ab0917faa585326e9281d61d6000
|
1c9528e26752b723e1d128b020f6c5291ed5ca19
|
refs/heads/master
| 2023-01-06T20:54:06.567512
| 2020-11-14T11:13:05
| 2020-11-14T11:13:05
| 288,323,344
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
# Given a list of daily temperatures T, return a list such that, for each day in the input,
# tells you how many days you would have to wait until a warmer temperature.
# If there is no future day for which this is possible, put 0 instead.
#
# For example, given the list of temperatures T = [73, 74, 75, 71, 69, 72, 76, 73],
# your output should be [1, 1, 4, 2, 1, 1, 0, 0].
#
# Note: The length of temperatures will be in the range [1, 30000].
# Each temperature will be an integer in the range [30, 100].
# 매일의 화씨 온도(F) 리스트 T를 받아, 더 따듯한 날씨를 위해서는 며칠을 더 기다려야 하는 지를 출력하라
from typing import List
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
# 스택값 비교
stack = [0]
results = [0] * len(T)
# 현재의 인덱스를 계속 스택에 쌓아두다가, 이전보다 상승하는 지점에서 현재 온도와 스택에 쌓아둔 인덱스 지점의 온도 차이를 비교해서,
# 더 높다면 스택의 값을 pop으로 꺼내고, 현재 인덱스와 스택에 쌓아둔 인덱스의 차이를 정답으로 처리한다.
for i, temp in enumerate(T):
while stack and temp > T[stack[-1]]:
last = stack.pop()
results[last] = i - last
stack.append(i)
return results
# 37 / 37 test cases passed.
# Status: Accepted
# Runtime: 492 ms
# Memory Usage: 17.2 MB
#
# Your runtime beats 71.54 % of python3 submissions.
# Your memory usage beats 89.19 % of python3 submissions.
# <파이썬 알고리즘 인터뷰> 참고.
|
[
"im.your.energy@gmail.com"
] |
im.your.energy@gmail.com
|
c949fe10046ed1243b9b5b457337815e7cd492b2
|
124df74bce796598d224c4380c60c8e95756f761
|
/pythonPackages/matplotlib/doc/conf.py
|
f5e23c3021a3bf6281ee5318d4e0041ff5fd7269
|
[] |
no_license
|
Mapoet/AWIPS-Test
|
19059bbd401573950995c8cc442ddd45588e6c9f
|
43c5a7cc360b3cbec2ae94cb58594fe247253621
|
refs/heads/master
| 2020-04-17T03:35:57.762513
| 2017-02-06T17:17:58
| 2017-02-06T17:17:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,924
|
py
|
# -*- coding: utf-8 -*-
#
# Matplotlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 2 12:33:25 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# Import support for ipython console session syntax highlighting (lives
# in the sphinxext directory defined above)
import ipython_console_highlighting
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['matplotlib.sphinxext.mathmpl', 'math_symbol_table',
'sphinx.ext.autodoc', 'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive', 'inheritance_diagram',
'gen_gallery', 'gen_rst']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'Matplotlib'
copyright = '2008, John Hunter, Darren Dale, Michael Droettboom'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import matplotlib
version = matplotlib.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Plot directive configuration
# ----------------------------
plot_formats = ['png', 'hires.png', 'pdf']
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'matplotlib.css'
html_style = 'mpl.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = 'logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If nonempty, this is the file name suffix for generated HTML files. The
# default is ``".html"``.
html_file_suffix = '.html'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Custom sidebar templates, maps page names to templates.
html_sidebars = {'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': 'index.html', 'gallery':'gallery.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.
html_use_opensearch = 'False'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Matplotlibdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '11pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'Matplotlib.tex', 'Matplotlib', 'Darren Dale, Michael Droettboom, Eric Firing, John Hunter', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = None
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{txfonts}
"""
# Documents to append as an appendix to all manuals.
latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
latex_use_parts = True
# Show both class-level docstring and __init__ docstring in class
# documentation
autoclass_content = 'both'
|
[
"joshua.t.love@saic.com"
] |
joshua.t.love@saic.com
|
805056a25de493b432d80c6096bb9e9609fc3573
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/EjjBGn7hkmhgxqJej_11.py
|
ad87f45d4681248fbbf11c2febfac2a7ccef7ffa
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
"""
A word nest is created by taking a starting word, and generating a new string
by placing the word _inside_ itself. This process is then repeated.
Nesting 3 times with the word "incredible":
start = incredible
first = incre|incredible|dible
second = increin|incredible|credibledible
third = increinincr|incredible|ediblecredibledible
The final nest is `"increinincrincredibleediblecredibledible"` (depth = 3).
Given a _starting word_ and the _final word nest_ , return the _depth_ of the
word nest.
### Examples
word_nest("floor", "floor") ➞ 0
word_nest("code", "cocodccococodededeodeede") ➞ 5
word_nest("incredible", "increinincrincredibleediblecredibledible") ➞ 3
### Notes
N/A
"""
def word_nest(word, nest,c=0):
if nest == word: return c
else:
nest=nest.replace(word,'')
return word_nest(word,nest,c+1)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
bceec50928f3d2382b8e0575b6918c9538c23f91
|
6bd223ac5bbfe95d45a5f2f052b8b26cf4a4722d
|
/hydrocode/scripts/dump_replayer.py
|
bf86ab19442023e3bed9a08314cbb4866c61ebf3
|
[
"BSD-3-Clause"
] |
permissive
|
ajaykumarr123/software
|
ff2ddf9589571e5ed62f6f1e2325e4553686f436
|
e0b46eed87636afedc9be3a671edf70fc6cc6cb5
|
refs/heads/master
| 2022-04-23T11:36:55.535254
| 2020-04-27T02:16:34
| 2020-04-27T02:18:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
#!/usr/bin/env python3
#Script for replaying raw FPGA data dumps. Read Hydrophones Code wiki entry.
import socket, time, sys
import scipy.io
import numpy
PKT_LEN = 512 #total number of samples in an FPGA packet
NO_CH = 4 #number of channels
SAMPL_RATE = 200000
ADDR = "127.0.0.1" #local host because we are sending the data to the same machine
PORT = 8899 #hydromathd listens on this port
#loading mat file specified from terminal
data = scipy.io.loadmat(sys.argv[1])
#initializing UDP networking
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#sending packets
for pkt_no in range(len(data["raw_samples_interleaved"]) // PKT_LEN):
#forming a packet from the data. 'H' is unsigned 16 bit integer
send_buff = data["raw_samples_interleaved"][pkt_no * PKT_LEN : (pkt_no + 1) * PKT_LEN].astype('H')
#converting packet into a bytes array
payload = numpy.asarray(send_buff)
payload.tobytes()
#sending packet
sock.sendto(payload, (ADDR, PORT))
#waiting for the amount of time the FPGA would take to send another packet
time.sleep(float(PKT_LEN) / float(NO_CH) / float(SAMPL_RATE))
|
[
"leader@cuauv.org"
] |
leader@cuauv.org
|
d5b520dadfbbdd4d46f80f779e68c7bee555ae7c
|
0613b082bd90462e190bc51943356ce6ce990815
|
/baseinfo/forms.py
|
f92317a93a34016d026958d648ff845db9dae301
|
[] |
no_license
|
Hamidnet220/salary
|
1068aac4bc921436c03b627899370a86ca5e99be
|
4dc1f32dfa1d990e6c9f527b4a8d0e1df939262a
|
refs/heads/master
| 2020-05-04T18:09:24.086491
| 2019-04-22T20:22:32
| 2019-04-22T20:22:32
| 179,342,004
| 0
| 1
| null | 2019-04-11T10:43:29
| 2019-04-03T17:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 6,158
|
py
|
from django import forms
from .models import *
from django.utils.translation import ugettext_lazy as _
class EmployerForm(forms.ModelForm):
class Meta:
model= Employer
fields='__all__'
def save_record(self):
Employer.objects.create(**self.cleaned_data)
def update_record(self,id):
Employer.objects.filter(id=id).update(**self.cleaned_data)
class EmployeeStatusForm(forms.Form):
title = forms.CharField(label="عنوان وضعیت کارکنان:",max_length=50)
description = forms.CharField(label="توضیحات:",widget=forms.Textarea)
def save_record(self):
EmployeeStatus.objects.create(**self.cleaned_data)
class WorkStatusForm(forms.Form):
title = forms.CharField(label="عنوان وضعیت کاری:",max_length=50)
description = forms.CharField(label="توضیحات:",widget=forms.Textarea,required=False)
def save_record(self):
WorkStatus.objects.create(**self.cleaned_data)
class MaritalStatusForm(forms.Form):
title = forms.CharField(label="عنوان وضعیت تاهل:",max_length=20)
description = forms.CharField(label="توضیحات:",widget=forms.Textarea,required=False)
def save_record(self):
MaritalStatus.objects.create(**self.cleaned_data)
class BankForm(forms.Form):
title = forms.CharField(label="نام بانک:",max_length=50)
description = forms.CharField(label="توضیحات:",required=False,widget=forms.Textarea)
def save_record(self):
Bank.objects.create(**self.cleaned_data)
class WorkGroupForm(forms.Form):
title = forms.CharField(label="عنوان گروه شغلی:",max_length=100)
child_benefit = forms.DecimalField(label="مبلغ حق اولاد برای یک نفر:",max_digits=50,decimal_places=2)
dwelling_benefit= forms.DecimalField(label="مبلغ حق مسکن:",max_digits=50,decimal_places=2)
Bon_benefit = forms.DecimalField(label="مبلغ بن:",max_digits=50,decimal_places=2)
def save_record(self):
WorkGroup.objects.create(**self.cleaned_data)
class WorkPlaceForm(forms.Form):
title = forms.CharField(label="عنوان محل کار:",max_length=60)
description = forms.CharField(label="توضیحات:",required=False,widget=forms.Textarea)
def save_record(self):
WorkPlace.objects.create(**self.cleaned_data)
class PostPlaceForm(forms.Form):
title = forms.CharField(label="عنوان محل پست:",max_length=60)
number_of_employee = forms.IntegerField(label="تعداد نفرات پست")
post_status = forms.ModelChoiceField(WorkStatus.objects.all(),label="وضعیت پست")
decription = forms.CharField(label="توضیحات:",required=False,widget=forms.Textarea)
def save_record(self):
PostPlace.objects.create(**self.cleaned_data)
class AddMilitarySerStatus(forms.ModelForm):
class Meta:
model=MilitaryServiceStat
fields= '__all__'
def save_record(self):
MilitaryServiceStat.objects.create(**self.cleaned_data)
def update_record(self,id):
MilitaryServiceStat.objects.filter(id=id).update(**self.cleaned_data)
class AddCityForm(forms.ModelForm):
class Meta:
model=City
fields= '__all__'
def save_record(self):
City.objects.create(**self.cleaned_data)
def update_record(self,id):
City.objects.filter(id=id).update(**self.cleaned_data)
class AddCountryForm(forms.ModelForm):
class Meta:
model=Country
fields= '__all__'
def save_record(self):
Country.objects.create(**self.cleaned_data)
def update_record(self,id):
Country.objects.filter(id=id).update(**self.cleaned_data)
class EmployeeForm(forms.Form):
employer = forms.ModelChoiceField(Employer.objects.all(),label="نام کارفرما:")
firstname = forms.CharField(label="نام:",max_length=50)
lastname = forms.CharField(label="نام خانوادگی:",max_length=50)
fathername = forms.CharField(label="نام پدر:",max_length=50)
national_code = forms.CharField(label="شماره ملی:",max_length=10)
id_number = forms.CharField(label="شماره شناسنامه:",max_length=10)
insurance_id = forms.CharField(label="کد بیمه:",max_length=10)
employee_status = forms.ModelChoiceField(EmployeeStatus.objects.all(),label="وضعیت پرسنل:")
work_place = forms.ModelChoiceField(WorkPlace.objects.all(),label="محل کار:")
post_place = forms.ModelChoiceField(PostPlace.objects.all(),label="محل پست:")
work_status = forms.ModelChoiceField(WorkStatus.objects.all(),label="وضعیت شغلی:")
marital_status = forms.ModelChoiceField(MaritalStatus.objects.all(),label="وضعیت تاهل:")
children_count = forms.IntegerField(label="تعداد فرزند")
work_group = forms.ModelChoiceField(WorkGroup.objects.all(),label="گروه شغلی:")
tax_exempt = forms.BooleanField(label="معافیت از پرداخت مالیات:")
indsurence_exempt= forms.BooleanField(label="معافیت از پرداخت بیمه:")
tel = forms.CharField(label="تلفن تماس:",max_length=19,required=False)
mobile = forms.CharField(label="شماره همراه:",max_length=19,required=False)
description = forms.CharField(label="توضسحات:",required=False,widget=forms.Textarea)
def save_record(self):
Employee.objects.create(**self.cleaned_data)
class EmployeeFormModel(forms.ModelForm):
class Meta:
model=Employee
fields='__all__'
def update_record(self,id):
Employee.objects.filter(id=id).update(**self.cleaned_data)
# Constant form
class ConstantForm(forms.ModelForm):
class Meta:
model=Constant
fields="__all__"
def save_record(self):
Constant.objects.create(**self.cleaned_data)
def update_record(self,id):
Constant.objects.filter(id=id).update(**self.cleaned_data)
|
[
"kiani.hamidreza@gmail.com"
] |
kiani.hamidreza@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.