blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbbe06ceccb250198a84188d034663571ac01ebe
|
d423ac8072d7796ed512a41ee0c4221b0c98325c
|
/web/raw.py
|
62e96e88024a6186cce1802c5c7eb0ac52a40929
|
[
"Apache-2.0"
] |
permissive
|
MagiCircles/frgl
|
fa579e965c451434e85b1bd7b1704869906fce2c
|
f8da4d6ff688862bfef0902e8315160ae1049e0a
|
refs/heads/master
| 2021-05-29T21:15:33.324305
| 2015-11-01T01:15:10
| 2015-11-01T01:15:10
| 42,368,540
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
# -1 means the value is currently unknown
# -2 means the value differs depending on the card
cards_data = {
'C': {
'stages': 1,
'experiences': [0, 117, 417],
'performances': [60, 132, 200],
'levels': [0, 10, 20],
'max_level_reward': 10,
},
'R': {
'stages': 2,
'experiences': [0, 568, 1423, 2858],
'performances': [66, 210, 258, 299],
'levels': [0, 20, 30, 40],
'max_level_reward': 20,
},
'SR': {
'stages': 3,
'experiences': [0, 1532, 3083, 6800, -1],
'performances': [72, 273, 316, 368, 400],
'levels': [0, 30, 40, 55, 70],
'max_level_reward': 30,
},
'UR': {
'stages': 3,
'experiences': [0, 3303, -1, -1, -1],
'performances': [80, 341, -1, -1, -1],
'levels': [0, 40, 55, 80, 100],
'max_level_reward': 40,
},
}
|
[
"db0company@gmail.com"
] |
db0company@gmail.com
|
e2a001631a36104800fc4d40f2e65499ec59a9f7
|
b7546fccec3f417ece54d8fd9da369176f9190a8
|
/yt_auth.py
|
05ffc67d1740ba122cac09613e557a83a5383596
|
[] |
no_license
|
Botmasher/yt-captions-languages
|
327f396c6357fb4ba1a50087b192c838f9e145d9
|
fa71069fabcfac5e6bfb5aaf9dda469b87c38037
|
refs/heads/master
| 2020-06-12T14:12:29.992320
| 2019-07-01T20:44:15
| 2019-07-01T20:44:15
| 194,325,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
# -*- coding: utf-8 -*-
# Instructions for running these code samples locally:
# https://developers.google.com/explorer-help/guides/code_samples#python
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
client_secrets_dir = os.path.dirname(os.path.realpath(__file__))
client_secrets_filename = "client_secrets.json"
def youtube():
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
f"{client_secrets_dir}/{client_secrets_filename}",
scopes
)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials
)
return youtube
|
[
"languagejosh@hotmail.com"
] |
languagejosh@hotmail.com
|
e3c8b16e273d41aa26174d2de74d18df804737f3
|
183e4126b2fdb9c4276a504ff3ace42f4fbcdb16
|
/II семестр/Дискретна математика/Лаби/2016-17/Мазан 7109/2/Algorythms.py
|
4321fec35960e5e539d712a72179c4177fbdbb32
|
[] |
no_license
|
Computer-engineering-FICT/Computer-engineering-FICT
|
ab625e2ca421af8bcaff74f0d37ac1f7d363f203
|
80b64b43d2254e15338060aa4a6d946e8bd43424
|
refs/heads/master
| 2023-08-10T08:02:34.873229
| 2019-06-22T22:06:19
| 2019-06-22T22:06:19
| 193,206,403
| 3
| 0
| null | 2023-07-22T09:01:05
| 2019-06-22T07:41:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
from Names import names
import networkx as nx
import pylab as plt
from random import sample,randint
class graphs_gen():
def __init__(self,A,B,relation):
self.relation = relation
self.A = A
self.B = B
self.graph = nx.DiGraph()
def define_graph(self):
self.graph.add_nodes_from(self.A)
self.graph.add_nodes_from(self.B)
self.graph.add_edges_from(self.relation)
self.color_list = []
self.pos_list = {}
for i in range(len(self.A)):
self.color_list.append("crimson")
self.pos_list[self.A[i]]=(i,15)
for i in range(len(self.B)):
self.color_list.append("orange")
self.pos_list[self.B[i]] = (i,10)
return self.graph
def draw(self):
self.define_graph()
nx.draw(self.graph,self.pos_list,node_size = 500,node_color = self.color_list,font_size = 10,with_labels = True)
plt.show()
class algorythms:
def __init__(self, A:"list", B:"list"):
self.A = A
self.B = B
self.universal = self.decart_multiplication()
self.male_names = names().male_names
self.fem_names = names().fem_names
self.A_males = []
self.A_females = []
self.relation_S = []
self.children = []
self.godmothers = []
self.godmother()
self.compadre()
#basic operations
def decart_multiplication(self):
multiplication = [(i,j)for i in self.A for j in self.B]
return multiplication
def union(self,relation_1, relation_2):
return list(set(relation_1).union(set(relation_2)))
def intersection(self,relation_1, relation_2):
return list(set(relation_1).intersection(set(relation_2)))
def difference(self,relation_1, relation_2):
return list(set(relation_1).difference(set(relation_2)))
def complement(self,universal,relation):
return list(set(universal).difference(set(relation)))
def inverted_relation(self,relation):
return [(i[1], i[0]) for i in relation]
#operations according to my variant
def godmother(self):
self.A_males = list(set(self.A).intersection(set(self.male_names)))
self.A_females = list(set(self.A).intersection(set(self.fem_names)))
if len(self.A_females)<len(self.B):
self.godmothers = sample(self.A_females,randint(1,len(self.A_females)-1))
else:
self.godmothers = sample(self.A_females,randint(1,len(self.B)-1))
self.relation_S = list(zip(self.godmothers,self.B))
self.children = [i[1] for i in self.relation_S]
return self.relation_S
def compadre(self):
self.unmarked_in_B = list(set(self.B).difference(set(self.children)))
self.B_males = list(set(self.B).intersection(set(self.male_names)))
self.B_females = list(set(self.B).intersection(set(self.fem_names)))
self.fathers = list(set(self.B_males).intersection(self.unmarked_in_B))
self.mothers = list(set(self.B_females).intersection(self.unmarked_in_B))
self.pairs = list(zip(self.mothers,self.fathers))
alpha_R = list(zip(self.godmothers,self.pairs))
self.relation_R = []
#alpha_R represents relation of godmother to pair,
# not relation of godmother to distinct parent
for i in alpha_R:
self.relation_R.append((i[0],i[1][0]))
self.relation_R.append((i[0],i[1][1]))
|
[
"mazanyan027@gmail.com"
] |
mazanyan027@gmail.com
|
df4bd70ae363af51471337895bdf0d0e88de410a
|
e78154abbb8bacf5afccda9da371684cbeabad36
|
/envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/web2/dav/method/put.py
|
17bbf984ea913e62fbe9dbcba0185cc46d7d0d2f
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
enterstudio/popego
|
1a196fabc374c0f45764e5c74bd7752236424040
|
2d09e793d9d2f297139edb325b8a70ddda9b2705
|
refs/heads/master
| 2021-04-09T16:39:40.781634
| 2016-10-14T16:53:47
| 2016-10-14T16:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,519
|
py
|
# -*- test-case-name: twisted.web2.dav.test.test_put -*-
##
# Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
"""
WebDAV PUT method
"""
__all__ = ["preconditions_PUT", "http_PUT"]
from twisted.python import log
from twisted.web2 import responsecode
from twisted.web2.http import HTTPError, StatusResponse
from twisted.web2.dav.fileop import put
def preconditions_PUT(self, request):
if self.fp.exists():
if not self.fp.isfile():
log.err("Unable to PUT to non-file: %s" % (self.fp.path,))
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
"The requested resource exists but is not backed by a regular file."
))
resource_is_new = False
else:
if not self.fp.parent().isdir():
log.err("No such directory: %s" % (self.fp.path,))
raise HTTPError(StatusResponse(
responsecode.CONFLICT,
"Parent collection resource does not exist."
))
resource_is_new = True
#
# HTTP/1.1 (RFC 2068, section 9.6) requires that we respond with a Not
# Implemented error if we get a Content-* header which we don't
# recognize and handle properly.
#
for header, value in request.headers.getAllRawHeaders():
if header.startswith("Content-") and header not in (
#"Content-Base", # Doesn't make sense in PUT?
#"Content-Encoding", # Requires that we decode it?
"Content-Language",
"Content-Length",
#"Content-Location", # Doesn't make sense in PUT?
"Content-MD5",
#"Content-Range", # FIXME: Need to implement this
"Content-Type",
):
log.err("Client sent unrecognized content header in PUT request: %s"
% (header,))
raise HTTPError(StatusResponse(
responsecode.NOT_IMPLEMENTED,
"Unrecognized content header %r in request." % (header,)
))
def http_PUT(self, request):
"""
Respond to a PUT request. (RFC 2518, section 8.7)
"""
log.msg("Writing request stream to %s" % (self.fp.path,))
#
# Don't pass in the request URI, since PUT isn't specified to be able
# to return a MULTI_STATUS response, which is WebDAV-specific (and PUT is
# not).
#
return put(request.stream, self.fp)
|
[
"santisiri@gmail.com"
] |
santisiri@gmail.com
|
12ff03f8aed23af658ae9feafae2a89b573e0010
|
0ec51a4bf5013e8601d5fa621f6cde88eeb7b4b5
|
/CUDA_Python/课程3/solutions/col_sums_solution.py
|
be380447aa903d7bf52c47c924b00125a632c2dd
|
[
"MIT"
] |
permissive
|
bobo0810/PytorchNetHub
|
6228b0dd69ab2b22b81867c99262a0b1675759a1
|
15312703bc5bf2e1771de1c96402366fa3e727d4
|
refs/heads/master
| 2023-08-30T14:35:59.369145
| 2023-08-14T03:22:58
| 2023-08-14T03:22:58
| 132,425,167
| 383
| 95
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
@cuda.jit
def col_sums(a, sums, ds):
idx = cuda.grid(1)
sum = 0.0
for i in range(ds):
sum += a[i][idx]
sums[idx] = sum
|
[
"1055271769@qq.com"
] |
1055271769@qq.com
|
91ab54c692b193ef15b70f550d25cc8e939e0c69
|
f68cd225b050d11616ad9542dda60288f6eeccff
|
/testscripts/RDKB/component/CMAgent/TS_CMAGENT_GetParamValues.py
|
1f450f59ff0d7bd463c9d4fb7aac9399f70ba71d
|
[
"Apache-2.0"
] |
permissive
|
cablelabs/tools-tdkb
|
18fb98fadcd169fa9000db8865285fbf6ff8dc9d
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
refs/heads/master
| 2020-03-28T03:06:50.595160
| 2018-09-04T11:11:00
| 2018-09-05T00:24:38
| 147,621,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,681
|
py
|
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>9</version>
<name>TS_CMAGENT_GetParamValues</name>
<primitive_test_id/>
<primitive_test_name>CMAgent_Get</primitive_test_name>
<primitive_test_version>2</primitive_test_version>
<status>FREE</status>
<synopsis>TC_CMAGENT_6 :: Get Parameter API Validation for CM Agent</synopsis>
<groups_id>4</groups_id>
<execution_time>2</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CMAGENT_5</test_case_id>
<test_objective>To Validate
Get Param Values Function for
CM Agent</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
TS_CMAGENT_GetParamValues
Input
1.Parameter Path (paramName)( eg: "Device.X_CISCO_COM_CableModem." )</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(TS_CMAGENT_GetParamValues - func name - "If not exists already"
cmagent - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automically by Test Manager with provided arguments in configure page (TS_CMAGENT_GetParamValues.py)
3.Execute the generated Script(TS_CMAGENT_GetParamValues.py) using excution page of Test Manager GUI
4.cmagentstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named TS_CMAGENT_GetParamValues through registered TDK cmagentstub function along with necessary Path Name as arguments
5.TS_CMAGENT_GetParamValues function will call Ccsp Base Function named "CcspBaseIf_getParameterValues" , that inturn will call CM Agent Library Function "CcspCcMbi_GetParameterValues" along with provided path name
6.Response(s)(printf) from TDK Component,Ccsp Library function and cmagentstub would be logged in Agent Console log based on the debug info redirected to agent console.
7.cmagentstub will validate the available result (from agent console log and Pointer to instance as updated) with expected result (Eg:"Values for Requested Param" along with info) and the same is updated to agent console log.
8.TestManager will publish the result in GUI as PASS/FAILURE based on the response from cmagentstub.</automation_approch>
<except_output>CheckPoint 1:
Parameter values from DUT should be available in Agent Console Log
CheckPoint 2:
TDK agent Test Function will log the test case result as PASS based on API response
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution page</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_CMAGENT_GetParamValues</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmagent","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMAGENT_GetParamValues');
#Get the result of connection with test component and STB
loadModuleresult =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadModuleresult;
loadStatusExpected = "SUCCESS"
if loadStatusExpected not in loadModuleresult.upper():
print "[Failed To Load CM Agent Stub from env TDK Path]"
print "[Exiting the Script]"
exit();
expectedresult = "SUCCESS";
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CMAgent_Get');
#Input Parameters
tdkTestObj.addParameter("paramName","Device.X_CISCO_COM_CableModem.");
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "[TEST EXECUTION RESULT] : %s" %resultDetails ;
obj.unloadModule("cmagent");
|
[
"jim.lawton@accenture.com"
] |
jim.lawton@accenture.com
|
d1c71a3a09bf1ab60e9939f0fc2e6fa3d0844338
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/rouge/test_util.py
|
041dfba5fa55d03239266f5a3c3157d65409c920
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434
| 2021-09-30T20:59:01
| 2021-09-30T21:07:02
| 156,725,548
| 1
| 0
|
Apache-2.0
| 2018-11-08T15:13:53
| 2018-11-08T15:13:52
| null |
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils for ROUGE."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
_TESTDATA_PREFIX = os.path.join(os.path.dirname(__file__), "testdata")
TARGETS_FILE = os.path.join(_TESTDATA_PREFIX, "target.txt")
PREDICTIONS_FILE = os.path.join(_TESTDATA_PREFIX, "prediction.txt")
LARGE_TARGETS_FILE = os.path.join(_TESTDATA_PREFIX, "target_large.txt")
LARGE_PREDICTIONS_FILE = os.path.join(_TESTDATA_PREFIX, "prediction_large.txt")
DELIMITED_FILE = os.path.join(_TESTDATA_PREFIX, "delimited.txt")
PYROUGE_DIR = os.path.join(_TESTDATA_PREFIX, "pyrouge_files")
def get_text(fname):
with open(fname) as f:
return f.read()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
2825a5aac7cc59f516e43a1498d6e501bcb4853d
|
e0fc7493f4339145792f54bcd7124acea500ca45
|
/cpc/ir/Case.py
|
9e62727bce0a942a4cb172aec455d5c2ebb9d7ae
|
[
"BSD-3-Clause"
] |
permissive
|
U-Ar/Cpresto
|
d52d99e8d44ed01c87c8911614d744cae695d6aa
|
f723458fb237c9e3e8bc8a6afdf7c81858a65363
|
refs/heads/main
| 2023-05-14T15:28:38.449783
| 2021-06-06T15:07:14
| 2021-06-06T15:07:14
| 364,445,894
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
from .IRDumpable import IRDumpable
class Case(IRDumpable):
def __init__(self,value,label):
self._value = value
self._label = label
def dump(self, dumper):
dumper.print_class(self)
dumper.print_member("value",self._value)
dumper.print_member("label",self._label)
|
[
"yuma.arakawa82128awakara.amuy@gmail.com"
] |
yuma.arakawa82128awakara.amuy@gmail.com
|
4f1a576b5379b63b33bfb71ad1943f8617ab700a
|
5eaef75ca4638702e79863fff688394d8afdd7bc
|
/testRunner-res.py
|
4b1bbc797c93a0c1b3d5a3daf1903873be7efbe3
|
[
"MIT"
] |
permissive
|
tejastank/domainThing
|
34045004332947e8ca881b0d0814d1118033b76a
|
28e1d3f6fc23d88a0487fb85a102b80c6a135117
|
refs/heads/master
| 2021-05-30T15:39:39.501489
| 2016-03-17T23:26:42
| 2016-03-17T23:26:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
from domainParser import domainReader
from domainResolver2 import domainResolver
from domainConsumer import domainConsumer
import logging
from multiprocessing import Process, current_process, active_children
if __name__ == "__main__":
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(3)
print("Starting to resolve")
resolver = domainResolver()
resolver.singleRun()
print("===============================")
# reader = domainReader('verisign', 1)
# Process(target=reader.getZoneFiles).start()
# reader.getZoneFiles()
# reader.getZoneFiles()
print("===============================")
#consumer = domainConsumer("testing-1")
#consumer.singleRun()
print("DONE")
# active_children()
# exit(0)
|
[
"chrisagallo@gmail.com"
] |
chrisagallo@gmail.com
|
9a975581de93e01be4828e18a85b4e49b68404c0
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/b36bBpsnzyDbd4mzF_0.py
|
370edaffbb2d97407a6bf21f3389a97d8d057256
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
"""
Create a function that calculates the chance of being an imposter. The formula
for the chances of being an imposter is `100 × (i / p)` where `i` is the
imposter count and `p` is the player count. Make sure to round the value to
the nearest integer and return the value as a percentage.
### Examples
imposter_formula(1, 10) ➞ "10%"
imposter_formula(2, 5) ➞ "40%"
imposter_formula(1, 8) ➞ "13%"
### Notes
The player limit is `10` and the imposter count can only go up to `3`.
"""
def imposter_formula(i, p):
return '{:.0%}'.format(i/p)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
df939de128f6f9c89f7761cd09c1e121e032004c
|
37e47457fa95ef39ce18d618c6150bdf3a287161
|
/python/mono_stack/s42_dp.py
|
d5089c39aa7a84d5c89d57c71d0d7572b51e4f6d
|
[] |
no_license
|
TQCAI/Algorithm
|
7e9c05055b02748c92e134861c78a74857346465
|
82a6cfdee5f02d56b884cb65872c3d820146ba7b
|
refs/heads/master
| 2023-03-19T08:42:00.423944
| 2021-03-11T04:10:12
| 2021-03-11T04:10:12
| 305,140,106
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
from typing import List
class Solution:
def trap(self, height: List[int]) -> int:
sum = 0
n = len(height)
max_left = [0] * n
max_right = [0] * n
for i in range(1, n - 1):
max_left[i] = max(max_left[i - 1], height[i - 1])
for i in reversed(range(1, n - 1)): # range(n - 2, 0, -1):
max_right[i] = max(max_right[i + 1], height[i + 1])
for i in range(1, n - 1):
min_height = min(max_left[i], max_right[i])
sum += max(0, min_height - height[i])
return sum
ans = Solution().trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1])
print(ans)
|
[
"1468632627@qq.com"
] |
1468632627@qq.com
|
7b1c9612fb7a17754a7b6b29764c37752e059f84
|
8fe4f275cfc1e4f81addcde950c7d50582ddab6b
|
/SellerUI/migrations/0006_vehicle_bought.py
|
404a56d72ae46afeb490603db671f0b031ad4863
|
[] |
no_license
|
strange-hawk/ResaleValuePrediction
|
5d09b075fcd8367198f02f69efa0402b435b3531
|
c0e6cec56e0c3bcf5dcb7e3b46a7ee9152893382
|
refs/heads/master
| 2022-11-11T04:50:23.610331
| 2020-06-26T17:00:35
| 2020-06-26T17:00:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# Generated by Django 3.0.5 on 2020-06-26 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SellerUI', '0005_auto_20200626_1807'),
]
operations = [
migrations.AddField(
model_name='vehicle',
name='bought',
field=models.BooleanField(default=False),
),
]
|
[
"tanmay.ambadkar@gmail.com"
] |
tanmay.ambadkar@gmail.com
|
b83fe49dd4b24404a9c5d47f5d730ca86340f1e2
|
ef701a68d9cd1f1f2e3699cc0b98b138eeb11934
|
/job.py
|
f335dbe5a778b9003eb95d2d69d87ff2dc514f8d
|
[] |
no_license
|
yangxuserene/BBSimulator
|
b920344bc8abb78835a4ff5f5a17d516bbb241bd
|
684822d275bb4d180016a9e1a4f4673f7d3755b3
|
refs/heads/master
| 2021-01-17T12:30:47.256223
| 2016-02-23T19:54:02
| 2016-02-23T19:54:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,068
|
py
|
#!/usr/bin/env python
from enum import Enum
import logging
class BBJobTimeStamp(object):
"""timing statistics"""
def __init__(self, submit):
super(BBJobTimeStamp, self).__init__()
self.submit = float(submit) # when job goes into input queue
self.start_in = 0.0
self.finish_in = 0.0 # when job goes into run queue
self.start_run = 0.0
self.finish_run = 0.0 # when job goes into out queue
self.start_out = 0.0
self.finish_out = 0.0
class BBJobDemand(object):
"""demand statistics"""
def __init__(self, num_core, bb_in, bb, data_out):
super(BBJobDemand, self).__init__()
self.num_core = float(num_core)
self.bb_in = float(bb_in)
self.bb = float(bb)
# additional trace data
self.data_in = float(bb_in)
self.data_out = float(data_out)
def __str__(self):
return "dv = [%d cores, %.2f in_buffer, %.2f buffer, %.2f out_data]" % \
(self.num_core, self.bb_in, self.bb, self.data_out)
class BBJobStatus(Enum):
"""job status"""
WaitInput = 1
Inputing = 2
WaitRun = 3
Running = 4
WaitOutput = 5
Outputing = 6
Complete = 7
class BBJob(object):
"""jobs with burst buffer demand"""
def __init__(self, job_id, submit, demand, rt):
super(BBJob, self).__init__()
self.job_id = job_id
ts = BBJobTimeStamp(submit)
self.ts = ts
self.demand = demand
self.runtime = float(rt)
self.status = BBJobStatus.WaitInput
def jobStatus(self):
if self.status == BBJobStatus.WaitInput:
return 'Wait Input'
elif self.status == BBJobStatus.Inputing:
return 'Inputing'
elif self.status == BBJobStatus.WaitRun:
return 'Wait Run'
elif self.status == BBJobStatus.Running:
return 'Running'
elif self.status == BBJobStatus.WaitOutput:
return 'Wait Out'
elif self.status == BBJobStatus.Outputing:
return 'Outputing'
else:
return 'Complete'
def __str__(self):
return 'job_%d, %s [%s]' % (self.job_id,
self.demand, self.jobStatus())
def dumpTimeStatistic(self):
if self.status == BBJobStatus.Complete:
submit = self.ts.submit
waiting_in = self.ts.start_in - self.ts.submit
waiting_run = self.ts.start_run - self.ts.finish_in
waiting_out = self.ts.start_out - self.ts.finish_run
inputing = self.ts.finish_in - self.ts.start_in
running = self.ts.finish_run - self.ts.start_run
outputing = self.ts.finish_out - self.ts.start_out
complete = self.ts.finish_out
total_wait = waiting_in + waiting_run + waiting_out
response = complete - submit
return [self.job_id, submit, waiting_in, inputing,
waiting_run, running, waiting_out, outputing,
complete, total_wait, response]
|
[
"littlepretty881203@gmail.com"
] |
littlepretty881203@gmail.com
|
03ce106bae9066ec66a3695dd6a7d4c52c838338
|
d4d7c65bd4a4283b60caf1d8cea69a4aec9e594d
|
/04. Mid Exam Tasks/03. Froggy Squad.py
|
0af494f78e673e0361b1e6c04525249814f6f788
|
[] |
no_license
|
tony-andreev94/Python-Fundamentals
|
db806d05168c1bb61763e5b3d27f495f045e7667
|
32032d37e3a8f70e156db6ccad2d9f25aac62c23
|
refs/heads/master
| 2021-08-07T12:32:27.624459
| 2020-12-24T12:47:05
| 2020-12-24T12:47:05
| 233,568,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
# Create a program that helps you keep track of the frogs that are on the riverside.
# Because you are an extreme animal lover, you also name them.
# You will receive the names of the frogs that are already on the riverside on a single line,
# separated by a single space in the following format:
# "{frog1} {frog2} {frog3}… {frogn}"
# Then you will receive commands that describe their action. There are five possible commands:
frog_names = input().split(' ')
while True:
command = input().split(' ')
if command[0] == 'Join':
frog_names.append(command[1])
if command[0] == 'Jump':
frog_names.insert(int(command[2]), command[1])
if command[0] == 'Dive':
# Check index
frog_names.remove(frog_names[int(command[1])])
if command[0] == 'First':
for index in range(int(command[1])):
print(frog_names[index], end=" ")
if index == len(frog_names) - 1:
break
if command[0] == 'Last':
for index in range(-1, -(int(command[1]) + 1), -1):
print(frog_names[index], end=" ")
if index == len(frog_names) - 1:
break
if command[0] == 'Print':
print()
print("Frogs:", end=" ")
if command[1] == 'Normal':
for each_frog in frog_names:
print(each_frog, end=" ")
break
if command[1] == 'Reversed':
for each_frog in reversed(frog_names):
print(each_frog, end=" ")
break
|
[
"tony.andreev94@outlook.com"
] |
tony.andreev94@outlook.com
|
f41df66c6e1adf697bf2f3d6c03b719db4aa44b3
|
1f41b828fb652795482cdeaac1a877e2f19c252a
|
/maya_menus/_MAINMENU_PMC_Rigging/19.Selection/08.Select Destination Connection( ctls, attrs ).py
|
f8de72daebe0d35b572f115cc222baf4738737b2
|
[] |
no_license
|
jonntd/mayadev-1
|
e315efe582ea433dcf18d7f1e900920f5590b293
|
f76aeecb592df766d05a4e10fa2c2496f0310ca4
|
refs/heads/master
| 2021-05-02T07:16:17.941007
| 2018-02-05T03:55:12
| 2018-02-05T03:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import pymel.core
sels = pymel.core.ls( sl=1 )
selAttrs = pymel.core.channelBox( 'mainChannelBox', q=1, sma=1 )
targets = []
for sel in sels:
for attr in selAttrs:
targets += sel.attr( attr ).listConnections( s=0, d=1 )
pymel.core.select( targets )
|
[
"kimsung9k@naver.com"
] |
kimsung9k@naver.com
|
55f8bb4cc636189aed727dbdcd2d0918046f96db
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_quantizer/xnnc4xir/xnnc/proto/tf_pb2/reader_base_pb2.py
|
1578860ab1cd6daaf243152a58058981a8251456
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,019
|
py
|
"""
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/reader_base.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="tensorflow/core/framework/reader_base.proto",
package="tensorflow",
syntax="proto3",
serialized_options=_b(
"\n\030org.tensorflow.frameworkB\020ReaderBaseProtosP\001Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\370\001\001"
),
serialized_pb=_b(
'\n+tensorflow/core/framework/reader_base.proto\x12\ntensorflow"r\n\x0fReaderBaseState\x12\x14\n\x0cwork_started\x18\x01 \x01(\x03\x12\x15\n\rwork_finished\x18\x02 \x01(\x03\x12\x1c\n\x14num_records_produced\x18\x03 \x01(\x03\x12\x14\n\x0c\x63urrent_work\x18\x04 \x01(\x0c\x42p\n\x18org.tensorflow.frameworkB\x10ReaderBaseProtosP\x01Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\xf8\x01\x01\x62\x06proto3'
),
)
_READERBASESTATE = _descriptor.Descriptor(
name="ReaderBaseState",
full_name="tensorflow.ReaderBaseState",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="work_started",
full_name="tensorflow.ReaderBaseState.work_started",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="work_finished",
full_name="tensorflow.ReaderBaseState.work_finished",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_records_produced",
full_name="tensorflow.ReaderBaseState.num_records_produced",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="current_work",
full_name="tensorflow.ReaderBaseState.current_work",
index=3,
number=4,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=59,
serialized_end=173,
)
DESCRIPTOR.message_types_by_name["ReaderBaseState"] = _READERBASESTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ReaderBaseState = _reflection.GeneratedProtocolMessageType(
"ReaderBaseState",
(_message.Message,),
dict(
DESCRIPTOR=_READERBASESTATE,
__module__="tensorflow.core.framework.reader_base_pb2"
# @@protoc_insertion_point(class_scope:tensorflow.ReaderBaseState)
),
)
_sym_db.RegisterMessage(ReaderBaseState)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"do-not-reply@gitenterprise.xilinx.com"
] |
do-not-reply@gitenterprise.xilinx.com
|
c17d73e8b28549a190e9b51caacf123d8b11d9ec
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/isis/db.py
|
dfa245b2a4813380fba88996e099bc64cdb6034d
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381
| 2020-01-20T09:18:28
| 2020-01-20T09:18:28
| 235,065,676
| 0
| 0
| null | 2023-05-01T21:19:14
| 2020-01-20T09:36:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,562
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Db(Mo):
"""
The IS-IS database information.
"""
meta = ClassMeta("cobra.model.isis.Db")
meta.moClassName = "isisDb"
meta.rnFormat = "db-%(type)s"
meta.category = MoCategory.REGULAR
meta.label = "IS-IS Database"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.isis.GrpRec")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.isis.LspRec")
meta.childClasses.add("cobra.model.isis.Nexthop")
meta.childClasses.add("cobra.model.isis.NodeRec")
meta.childClasses.add("cobra.model.isis.Route")
meta.childClasses.add("cobra.model.isis.DTEp")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.NodeRec", "node-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.DTEp", "dtep-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.GrpRec", "grp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.LspRec", "lsp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.Nexthop", "nh-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.Route", "rt-"))
meta.parentClasses.add("cobra.model.isis.Dom")
meta.parentClasses.add("cobra.model.isis.DomLvl")
meta.superClasses.add("cobra.model.l3.Db")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.nw.Db")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.nw.GEp")
meta.rnPrefixes = [
('db-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14494, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 16436, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 3216, PropCategory.REGULAR)
prop.label = "Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 1
prop.defaultValueStr = "ls"
prop._addConstant("dtep", "dynamic-tunnel-endpoint-database", 6)
prop._addConstant("fn", "fabric-node-database", 5)
prop._addConstant("ls", "link-state-dtabase", 1)
prop._addConstant("mcgrp", "multicast-group-database", 4)
prop._addConstant("nh", "nexthop-database", 3)
prop._addConstant("rt", "route-database", 2)
meta.props.add("type", prop)
meta.namingProps.append(getattr(meta.props, "type"))
def __init__(self, parentMoOrDn, type, markDirty=True, **creationProps):
namingVals = [type]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"rrishike@cisco.com"
] |
rrishike@cisco.com
|
ff6b877cf1ff67b550b56f6ef180a404c3b60ca2
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2304/60775/297440.py
|
2a5c35b05a3f2272ea4b464e9c0bc37c673d30cf
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
in1 = input().split(' ')
num_v = int(in1[0])
root = int(in1[1])
tree = [[] for i in range(100)]
for i in range(num_v):
in2 = input().split(' ')
tree[int(in2[0])] = [int(in2[1]),int(in2[2])]
#广度优先遍历
stack = []
stack.append(root)
this_layer_nodes = 1
layer = 1
while stack != []:
next_layer_nodes = 0
print("Level", layer, ":",end='')
while this_layer_nodes > 0:
tmp = stack.pop(0)
print(" " +str(tmp),end='')
this_layer_nodes -= 1
if tree[tmp][0] != 0:
stack.append(tree[tmp][0])
next_layer_nodes += 1
if tree[tmp][1] != 0:
stack.append(tree[tmp][1])
next_layer_nodes += 1
this_layer_nodes = next_layer_nodes
layer += 1
print()
#Zigzag遍历
stack = []
stack.append(root)
this_layer_nodes = 1
layer = 1
while stack != []:
next_layer_nodes = 0
if layer % 2 == 1:
print("Level", layer, "from left to right:", end='')
while this_layer_nodes > 0:
tmp = stack.pop(0)
print(" " +str(tmp),end='')
this_layer_nodes -= 1
if tree[tmp][0] != 0:
stack.append(tree[tmp][0])
next_layer_nodes += 1
if tree[tmp][1] != 0:
stack.append(tree[tmp][1])
next_layer_nodes += 1
this_layer_nodes = next_layer_nodes
layer += 1
else:
print("Level", layer, "from right to left:", end='')
while this_layer_nodes > 0 :
tmp_nodes = []
while this_layer_nodes > 0:
tmp = stack.pop(0)
tmp_nodes.append(tmp)
this_layer_nodes -= 1
if tree[tmp][0] != 0:
stack.append(tree[tmp][0])
next_layer_nodes += 1
if tree[tmp][1] != 0:
stack.append(tree[tmp][1])
next_layer_nodes += 1
this_layer_nodes = next_layer_nodes
layer += 1
for i in range(len(tmp_nodes)-1,-1,-1):
print(' '+str(tmp_nodes[i]),end='')
print()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
6db40a61a9d2f970b2620229d396218da4bbba9a
|
e8cc093ce857f65882e25f4bfae94a395ffc2fe5
|
/PESA-BACK/remitapi/utils.py
|
15b7d841ed5d3fec3b88484e78393b7201794401
|
[] |
no_license
|
mandelashaban593/Chatting-App
|
f6d0f7ac3785da690f52a7c1427353956699af4c
|
0e6e5d9edb0a4f0c91a40391ae5916549e87ec7b
|
refs/heads/master
| 2021-01-22T06:01:42.649403
| 2017-03-20T08:08:55
| 2017-03-20T08:08:55
| 92,512,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,063
|
py
|
from remitapi.tasks import send_email, send_sms
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.conf import settings
from django.template import RequestContext
import sys
from datetime import datetime
def mailer(request, subject, template, content, to, sender=False):
if settings.DISABLE_COMMS:
return True
if not sender:
sender = "%s via %s" % (settings.APP_NAME, settings.APP_EMAILS['info'])
try:
content['STATIC_URL'] = "%semail/" % settings.STATIC_URL
html_content = render_to_string(
template, content, context_instance=RequestContext(request))
# this strips the html, so people will have the text as well.
text_content = strip_tags(html_content)
# create the email, and attach the HTML version as well.
send_email(subject, text_content, sender, to, html_content)
except Exception, e:
print e
return True
def send_msg_notification(msg, request):
if msg.is_note:
template = settings.EMAIL_TEMPLATE_DIR + 'new_note.html'
else:
template = settings.EMAIL_TEMPLATE_DIR + 'new_message.html'
data = {'msg': msg}
email = False
subject = "New Message from 199Fix"
try:
email = msg.recipient.email
if not subject:
subject = '%s via 199Fix' % msg.sender_profile().short_name
except Exception, e:
print e
mailer(request, subject, template, data, email)
def send_job_notification(job, request):
if not job.status == '1':
template = settings.EMAIL_TEMPLATE_DIR + 'new_job_status.html'
else:
template = settings.EMAIL_TEMPLATE_DIR + 'job.html'
data = {'job': job}
email = False
subject = "%s via 199Fix [ %s ]" % (
job.app.owner_profile().short_name,
job.name
)
try:
email = job.app.user.email
except Exception:
pass
if not email:
email = "madradavid@gmail.com"
mailer(request, subject, template, data, email)
def debug(e, txt=False, log='debug'):
txt = "%s %s" % (e, txt)
if settings.DEBUG_API:
if not txt:
txt = ''
print >> sys.stderr, 'Debuging____________________ %s' % txt
print >> sys.stderr, e
else:
try:
old_stdout = sys.stdout
log_file = open("%slogs/%s.log" % (settings.LOG_DIR, log), "a")
sys.stdout = log_file
print '%s: Debuging_____________%s' % (
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
txt
)
sys.stdout = old_stdout
log_file.close()
except Exception, e:
print e
def balance_low_email(request, wallet, transaction):
'''
balance is low
'''
template = settings.EMAIL_TEMPLATE_DIR + 'balance_low_email.html'
data = {'transaction': transaction}
email = "stone@eversend.co.ug"
subject = "Balance for %s is low" % wallet
mailer(request, subject, template, data, email)
def error_message(request, msgtype, data={}):
template = settings.BASE_DIR + 'templates/error_messages.html'
data['type'] = msgtype
text = render_to_string(
template, data, context_instance=RequestContext(request))
messages.error(request, text)
def success_message(request, msgtype, data={}):
template = settings.BASE_DIR + 'templates/success_messages.html'
data['type'] = msgtype
text = render_to_string(
template, data, context_instance=RequestContext(request))
messages.success(request, text)
def admin_mail(request, code, data=False, e=False):
'''admin email template'''
template = settings.EMAIL_TEMPLATE_DIR + 'admin.html'
subjects = {
'pending_transaction': 'Pending Transaction',
'complete_transaction': 'Transaction Complete',
'user_verification': 'User Pending Verification',
'user_verification_update': 'User Updated Verification Details',
'new_user': '',
'rates_error': 'An error occurred while fetching the rates',
'server_error': 'Dude your App Just Broke',
'contact_us': 'New Contact Message',
}
if settings.DEBUG:
emails = settings.DEBUG_EMAILS
if code == 'server_error':
emails = {'madradavid@gmail.com'}
elif code == 'contact_us':
emails = {'info@remit.ug'}
else:
emails = {'atwine@gmail.com'}
response = False
if code in subjects:
#emails = {'madra@redcore.co.ug'}
subject = subjects[code]
extradata = {}
extradata['data'] = data
extradata['code'] = code
# if e:
# extradata['e'] = repr(e)
sender = settings.APP_EMAILS['info']
if 'contact_us' in subjects:
sender = settings.APP_EMAILS['contact_us']
for email in emails:
response = mailer(request, subject, template,
extradata, email, sender)
return response
def sendsms(to, template, content):
'''backward compatibility ,move this to tasks.py'''
return send_sms(to, template, content)
# return True
COUNTRY_CHOICES = (
('UG', 'Uganda'),
('KE', 'Kenya'),
('TZ', 'Tanzania'),
('RW', 'Rwanda'),
)
NETWORK_CHOICES = (
('MTN', 'MTN Mobile Money'),
('AIRTEL', 'Airtel Money'),
('UTL', 'M-Sente'),
)
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
context = Context(context_dict)
html = template.render(context)
result = StringIO.StringIO()
#data = html.encode("ISO-8859-1")
data = html.encode('utf-8')
pdf = pisa.pisaDocument(StringIO.StringIO(data), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
def log_unauthorized_access(request):
debug(request, 'log_unauthorized_access')
def insufficient_account_balance(transaction):
print "insufficient_account_balance"
|
[
"mandelashaban593@gmail.com"
] |
mandelashaban593@gmail.com
|
5f562dcaa59d40c74a34cfe55ed08f30174a169d
|
49185bd5cf7e2f5190ce22b5189a09fe1ab6bb0f
|
/Proper/proper/prop_circular_obscuration.py
|
626faec83727f75711d6b3eb83c90439547c57bc
|
[
"MIT"
] |
permissive
|
RupertDodkins/MEDIS
|
c3f55d8adb6a8c4120593ba6552c9dfe3784d4e2
|
bdb1f00fb93506da2a1f251bc6780e70e97a16c5
|
refs/heads/master
| 2021-07-05T20:06:44.162517
| 2019-09-05T22:16:12
| 2019-09-05T22:16:12
| 160,850,558
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
import proper
def prop_circular_obscuration(wf, radius, xc = 0.0, yc = 0.0, **kwargs):
"""Multiply the wavefront by a circular obscuration.
Parameters
----------
wf : obj
WaveFront class object
radius : float
Radius of aperture in meters, unless norm is specified
xc : float
X-center of aperture relative to center of wavefront. Default is 0.0
yc : float
Y-center of aperture relative to center of wavefront. Default is 0.0
Returns
-------
Multiplies current wavefront in "wf" by a circular obscuration (0 inside,
1 outside).
Other Parameters
----------------
NORM : bool
If set to True, the specified radius and xc, yc aperure centers are
assumed to be normalized to the current beam radius (e.g. radius is 1.0
means the aperture is the same size as the current beam). xc, yc = 0,0
is the center of the wavefront. Default is False.
"""
if ("NORM" in kwargs and kwargs["NORM"]):
norm = True
else:
norm = False
wf.wfarr *= proper.prop_shift_center(proper.prop_ellipse(wf, radius, radius, xc, yc, NORM = norm, DARK = True))
return
|
[
"rupertdodkins@gmail.com"
] |
rupertdodkins@gmail.com
|
c255da1783f7d52da5f07fbf950c77042766e954
|
0f6a6edcfbfe5669a78748422c7fc0343d532f42
|
/src/events/adapters/celery.py
|
7284036cbe2d0d304bec18de7ae5555363a8f92d
|
[
"MIT"
] |
permissive
|
jonatasoli/the-eye
|
f734f814d0cf12ee157f1e69f95f6e2bf73a7a83
|
0a11334bc24c99dcb7158238bc2ae63a26dc63b1
|
refs/heads/main
| 2023-08-22T06:55:27.893110
| 2021-10-09T14:35:27
| 2021-10-09T14:35:27
| 415,181,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import celery
from loguru import logger
from flask import Flask
celery_obj = celery.Celery(__name__)
def init_app(app: Flask):
logger.info(app.config['CELERY_BROKER_URL'])
logger.info(app.config['CELERY_RESULT_BACKEND'])
celery_obj.conf.broker_url = app.config['CELERY_BROKER_URL']
celery_obj.conf.result_backend = app.config['CELERY_RESULT_BACKEND']
TaskBase = celery_obj.Task
class AppContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = AppContextTask
|
[
"contato@jonatasoliveira.me"
] |
contato@jonatasoliveira.me
|
087414c9fb5074b4342290c052f598198db0bc75
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/2460.py
|
bb3b758e7f02863f07f01b7e76c0a3ec1bdca277
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import numpy as np
T = int(raw_input())
def solve():
S = len(s)
# print s, K, S
cnt = 0
for ii in xrange(S-K+1):
if s[ii] == '-':
cnt+=1
for jj in xrange(K):
s[ii+jj] = '+' if s[ii+jj] == '-' else '-'
if s.count('-') > 0:
return 'IMPOSSIBLE'
else:
return cnt
for i in xrange(T):
s, K = raw_input().split()
K = int(K)
s = [c for c in s]
sol = solve()
print "Case #%d: %s"%(i+1,sol)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
4d1740d100a8a38bb1f5f3a400475d62046b4555
|
afbaa5685bf737ec7d16fee2bab54ae13caf96f9
|
/geekbang/data_analysis/ch04/numpy3.py
|
7556b5147749407b51b42d1bcc8a0f52d3aa9d31
|
[] |
no_license
|
ykdsg/myPython
|
9dcc9afe6f595e51b72257875d66ada1ba04bba6
|
77d2eaa2acb172664b632cc2720cef62dff8f235
|
refs/heads/master
| 2023-06-10T20:11:08.061075
| 2023-06-03T11:39:53
| 2023-06-03T11:39:53
| 10,655,956
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(np.amin(a))
# 延着 axis=0 轴的最小值
print(np.amin(a, 0))
print(np.amin(a, 1))
print(np.amax(a))
print(np.amax(a, 0))
print(np.amax(a, 1))
x = np.array([[0, 1], [2, 3]])
print(np.sum(x, axis=0))
print(np.sum(x, axis=1))
print(np.amin(x, axis=0))
print(np.amin(x, axis=1))
|
[
"17173as@163.com"
] |
17173as@163.com
|
a30322bdb05ecc03bd4eae8d604f413144cb093d
|
0aea9408dceec38dfa4d52be13256cd84367c15f
|
/my_blog/main/__init__.py
|
2c91492717aac4766ba8b72f7ad0e954e8ee4d4c
|
[] |
no_license
|
ezirmusitua/my-blog-with-flask
|
54d11f4cb0326be5a2dbbebf078282dd68b43e44
|
d7c2bda06d12359261fe25a24527138f437cef41
|
refs/heads/master
| 2021-01-16T21:05:34.267515
| 2016-06-21T11:09:17
| 2016-06-21T11:09:17
| 61,627,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views
|
[
"jferroal@gmail.com"
] |
jferroal@gmail.com
|
8c5ce3513996a713016395c9da9fc122a5f8709a
|
39e1320c74bcf0bbebb855645b4f538e9ef361f4
|
/src/genui/accounts/urls.py
|
6a3bd9d8c8926d8932b1dba59f7fc2685aaf3b8f
|
[
"MIT"
] |
permissive
|
Tontolda/genui
|
4c684e08e78b848e5afa7e4333bbea46c30d9d51
|
c5b7da7c5a99fc16d34878e2170145ac7c8e31c4
|
refs/heads/master
| 2023-04-14T12:57:31.774323
| 2021-01-29T08:01:30
| 2021-01-29T08:01:30
| 344,443,814
| 0
| 0
|
NOASSERTION
| 2021-04-24T14:56:35
| 2021-03-04T11:00:54
| null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
"""
urls
Created by: Martin Sicho
On: 4/30/20, 7:59 PM
"""
from django.urls import path, include, re_path
from django.contrib import admin
from allauth.account.views import ConfirmEmailView
urlpatterns = []
urlpatterns += [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
re_path(r'^accounts/registration/account-confirm-email/(?P<key>[-:\w]+)/$', ConfirmEmailView.as_view(),
name='account_confirm_email'),
path(f'api/accounts/rfauth/', include('rest_framework.urls')),
path('api/accounts/', include('rest_auth.urls')),
path('api/accounts/registration/', include('rest_auth.registration.urls')),
]
|
[
"sicho.martin@gmail.com"
] |
sicho.martin@gmail.com
|
4a4363b17be5d1d57768b191fe9a685a4a95b7a5
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/stdlib-464.py
|
6fd0b1b2c131801d99e2804b1f0a3ef65bd17f03
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
# ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return $INT # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
n:int = 10
# Run [-nc, nc] with step size c
s:str = ""
i:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
b55369c3e074890c1d761da477257d3372efcd96
|
d2e029233e08ea2b7f806728fb6fdb4313992d1d
|
/Python Fundamentals/for loop.py
|
303b1615818d69f5568318b74b3521dc1fecb333
|
[] |
no_license
|
pvr30/Python-Tutorial
|
f0ccc6c6af2346afc656e5f1f98bae69a58bda6d
|
3c4b968d0e0efbf454fbf9a9f98cd630a288b2d9
|
refs/heads/master
| 2023-06-02T10:08:50.891627
| 2021-06-21T16:36:11
| 2021-06-21T16:36:11
| 378,997,069
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
# for loop
for index in range(10):
print(index)
print("\n")
for index in range(5,10):
print(index)
print("\n")
for i in range(1,20,2):
print(i) # this will print number between 1 to 20 with difference of 2
# Print list.
name = ["Harsh","Sanjay","Sahil","Manthan"]
for index in name:
print(index)
# Print list of dictionary .
student_grade = [
{"name": "Harsh", "grade": 100},
{"name": "Sahil", "grade": 80},
{"name": "Manthan", "grade": 10},
]
for student in student_grade:
name = student["name"]
grade = student["grade"]
print(f"{name} got {grade} marks in Exam.")
|
[
"vishalparmar6958@gmail.com"
] |
vishalparmar6958@gmail.com
|
3cb38ba06488f204e037b03a48de04afff964cc6
|
ddefb1ad0ba5939b66e3f5e8fae5fb8a88f03d4a
|
/四级/四级.py
|
56a7d411d403c8ba85fa9f1d76dcb5e3d96676c2
|
[] |
no_license
|
github3332422/case
|
45ea6a8f7528affa9b724bb31088c8c8c9f0cdf2
|
fb86da7f94c8ce24f142e1d34d2cec1f6e21a81f
|
refs/heads/master
| 2020-05-19T13:28:37.379740
| 2019-05-05T14:15:49
| 2019-05-05T14:15:49
| 185,039,295
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,095
|
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Time:2018/8/24 9:37
# @Author: wardseptember
# @File: CountHighFrequencyWords.py
import re
excludes = ['the', 'of', 'to', 'and', 'in', 'a', 'is', 'were', 'was', 'you',
'I', 'he', 'his', 'there', 'those', 'she', 'her', 'their',
'that', '[a]', '[b]', '[c]', '[d]', 'them', 'or','for','as',
'are','on','it','be','with','by','have','from','not','they',
'more','but','an','at','we','has','can','this','your','which','will',
'one','should','points)','________','________.','all','than','what',
'people','if','been','its','new','our','would','part','may','some','i',
'who','answer','when','most','so','section','no','into','do','only',
'each','other','following','had','such','much','out','--','up','these',
'even','how','directions:','use','because','(10','time','(15','[d].',
'-','it.','[b],','[a],','however,','1','c','1.','2.','b','d','a','(10',
'2','12.','13.','29.','3.','4.','5.','6.','7.','8.','9.','10.','11.','14.',
'15.']
#自行过滤简单词,太多了不写了
def getTxt():
txt = open('86_17_1_2.txt').read()
txt = txt.lower()
for ch in '!"@#$%^&*()+,-./:;<=>?@[]_`~{|}': #替换特殊字符
txt.replace(ch, ' ')
return txt
#1.获取单词
EngTxt = getTxt()
#2.切割为列表格式
txtArr = EngTxt.split()
#3.遍历统计
counts = {}
for word in txtArr:
flag=True
for word1 in excludes:
if word==word1:
flag=False
else:
continue
if flag is True:
counts[word] = counts.get(word, 0) + 1
else:
continue
#4.转换格式,方便打印,将字典转换为列表
countsList = list(counts.items())
countsList.sort(key=lambda x:x[1], reverse=True)#按次数从大到小排序
#5.打印输出
for word,count in countsList:
with open('output_3.txt','a+') as f:
str1=word+' : '+str(count)+ '次'
f.writelines(str1+'\n')
f.close()
#print('{0:<10}{1:>5}'.format(word,count))
|
[
"mail.zhangqing@gmail.com"
] |
mail.zhangqing@gmail.com
|
dec8f4a34c5465e586dbdbcddfbb3d2c85cc27bc
|
8f81a0ff4a5bef52bdc23decfdd21183040451f1
|
/docs/conf.py
|
e411946184148cfbcbb42fbc600b02373eb15ba1
|
[
"MIT"
] |
permissive
|
shimw6828/sramongo
|
22faad838376a2b264cb003c937522b72b67d6d0
|
7b73a2a71c9c12160da7d0aaa1537a615a356136
|
refs/heads/master
| 2022-11-14T18:33:17.122167
| 2020-07-02T19:42:09
| 2020-07-02T19:42:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,163
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sramongo documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 5 14:49:04 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sramongo'
copyright = '2017, Justin Fear'
author = 'Justin Fear'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `_todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sramongodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sramongo.tex', 'sramongo Documentation',
'Justin Fear', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sramongo', 'sramongo Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sramongo', 'sramongo Documentation',
author, 'sramongo', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Options for Napoleon ----------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
[
"justin.m.fear@gmail.com"
] |
justin.m.fear@gmail.com
|
15ef14fad6965eb4f3742e852d29527029f1f162
|
09df68c721f724bdfa1d00f18d5ee6fffba548df
|
/NK_25.py
|
f53bf49e6d6c9a353293f13fe6d37c671183f4ed
|
[] |
no_license
|
NeilWangziyu/JZOffer
|
c8cde80cf54545244ebeb3a9fc88d5ac6b832622
|
ce29ea836bd20841d69972180273e4d4ec11514d
|
refs/heads/master
| 2020-04-23T06:19:48.434829
| 2019-09-25T12:35:38
| 2019-09-25T12:35:38
| 170,969,684
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
n,m = list(map(int,input().split()))
def getChildCount(pre,mx):
ans = 1
p = 10
while pre * p <= mx:
if pre * p + p - 1 <= mx:
ans += p
else:
ans += mx - pre * p + 1
p *= 10
return ans
def find(m,mx):
ans = 1
while m != 0:
v = getChildCount(ans, mx)
if v < m:
ans += 1
m -= v
continue
if m == 1:
break
ans *= 10
m -= 1
return ans
print(find(m,n))
# 查找字典序排数中具体位置的数
# http://www.lyqhahaha.xyz/P/64/
|
[
"17210720158@fudan.edu.cn"
] |
17210720158@fudan.edu.cn
|
73c1a9a4086a951478d1b79d38eca31ad7308602
|
84046429e40890d1c0e1064726db0cf359dbadcf
|
/django_git_deploy/__init__.py
|
e8400d0c61ee787ebe7d4c3f9c382fed2532bc11
|
[] |
no_license
|
joakim-hove/django-git-deploy
|
8cb346ccff632209df8a5d5fcb69eb6b1b7d6191
|
6413fa7d1b71258a2f176b1630dbc4ac602bbb49
|
refs/heads/master
| 2022-06-07T05:42:53.915611
| 2022-05-23T21:32:59
| 2022-05-23T21:32:59
| 183,873,411
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,858
|
py
|
import sys
import os
import os.path
import subprocess
import shutil
import time
import yaml
import fnmatch
from contextlib import contextmanager
@contextmanager
def env_context(env):
env0 = os.environ.copy()
for key,value in env.items():
if value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = value
yield
for key in os.environ:
if key in env0:
os.environ[key] = env0[key]
else:
del os.environ[key]
@contextmanager
def pushd(path):
cwd0 = os.getcwd()
if path:
os.chdir(path)
yield
os.chdir(cwd0)
class Config(object):
config_file = "deploy_config.yml"
def __init__(self, config_file = "hooks/{}".format(config_file)):
self.data = yaml.safe_load(open(config_file))
for config_branch,config in self.data.items():
if not "path" in config:
raise OSError("Must have a path setting in the branch payload")
path = config["path"]
if not os.path.isdir(path):
print("path: {} does not exist".format(path))
raise OSError("The path setting must point to an existing directory")
if os.path.isdir(os.path.join(path, ".git")):
raise OSError("Target path should not be the git repository")
self.repo, _ = os.path.splitext( os.path.basename( os.getcwd() ))
self.repo_path = os.path.dirname( os.getcwd() )
def config_branch(self, git_branch):
for config_branch in self.data.keys():
if fnmatch.fnmatch(git_branch, config_branch):
return config_branch
return None
def path(self, config_branch):
return self.data[config_branch]["path"]
def script(self, config_branch):
return self.data[config_branch].get("script")
def env(self, config_branch):
return self.data[config_branch].get("env", {})
def reload_apache():
subprocess.call( ["sudo", "systemctl", "reload", "apache2"])
def update_wc(git_branch, conf):
config_branch = conf.config_branch(git_branch)
if config_branch is None:
return
path = conf.path(config_branch)
env = {"GIT_DIR" : None, "GIT_WORK_TREE": None}
env.update(conf.env(config_branch))
with env_context(env):
with pushd(path):
if not os.path.isdir(conf.repo):
subprocess.call(["git", "clone", "--recursive" , "{}/{}".format(conf.repo_path, conf.repo)])
os.chdir(conf.repo)
cmd_list = [["git" , "fetch" , "origin"],
["git" , "reset" , "--hard","origin/%s" % git_branch]]
static_source = os.path.join( path , conf.repo, "staticfiles" )
if not os.path.isdir( static_source ):
os.mkdir( static_source )
for cmd in cmd_list:
print("[{}/{}]: {}".format(path, conf.repo, " ".join(cmd)))
subprocess.call( cmd ,
stdout = open(os.devnull , "w") ,
stderr = open(os.devnull , "w") )
script = conf.script(config_branch)
if script:
if os.path.isfile(script) and os.access(script, os.X_OK):
path, f = os.path.split(script)
with pushd(path):
subprocess.call([os.path.abspath(f)])
else:
print("script path: {} does not exist".format(script))
raise OSError("Script does not exist")
def post_receive():
conf = Config()
for line in sys.stdin.readlines():
(_, _, ref) = line.split()
git_branch = ref.split("/")[-1]
update_wc(git_branch, conf)
reload_apache()
def deploy(branch):
conf = Config()
update_wc(branch, conf)
reload_apache()
def make_hook():
_, hook_path = os.path.split(os.path.abspath(os.getcwd()))
if hook_path != "hooks":
raise OSError("The make_hook script must be invoked from the hooks/ directory in a git repo")
if os.path.exists("post-receive.sample"):
print("Removing existing post-receive.sample file")
if os.path.exists("post-receive.sample"):
os.unlink("post-receive.sample")
with open("post-receive", "w") as f:
f.write("""#!/usr/bin/env python3
from django_git_deploy import post_receive
post_receive()
""")
os.chmod("post-receive", 0o755)
if not os.path.exists(Config.config_file):
d = {"master": {"path": "/path/to/deploy/master",
"env": {"KEY1" : "VALUE1"}}}
with open(Config.config_file, "w") as f:
f.write(yaml.dump(d))
print("Sample configuration stored in: {}".format(Config.config_file))
|
[
"joakim.hove@gmail.com"
] |
joakim.hove@gmail.com
|
4aeedb82b2fb9519b3d200ad0e488617b17fae76
|
b194dbc7889e8175993a76f2d2f65a4b81c5d725
|
/statistics_hypothesis_testing/18_one_sided_p_value.py
|
1b94a09124221130a1bc99028b8c1cb0ed6514c0
|
[] |
no_license
|
rahulsaini/oreilly_math_fundamentals_data_science
|
f96d40f5684c7b1cd993993d82931694afea6976
|
888326dfdea37f5a9b336d37e127a72ae4512a5a
|
refs/heads/main
| 2023-05-18T22:32:32.928472
| 2021-06-08T13:07:22
| 2021-06-08T13:07:22
| 372,832,069
| 1
| 0
| null | 2021-06-01T13:02:53
| 2021-06-01T13:02:52
| null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
from scipy.stats import norm
# Cold has 18 day mean recovery, 1.5 std dev
mean = 18
std_dev = 1.5
# Probability of 16 or less days
x = norm.cdf(16, mean, std_dev)
print(x) # 0.09121121972586788
|
[
"thomasnield@live.com"
] |
thomasnield@live.com
|
28643ca7f0873fb86a9b6ce8a36119c0b6719ae3
|
f1600240f3bbadfa8f190c165bd40b0f74110652
|
/model/core/XML_CNN.py
|
77d8c627d999b273d9fe1bfd6474da3313acf7df
|
[] |
no_license
|
shubhampachori12110095/XMTC
|
b45801e143710e97ad8098ee028b4c44b22cb110
|
b93a8a78c7799461b4853006f5cd7a0fc4fcdc67
|
refs/heads/master
| 2020-04-22T23:26:19.869742
| 2018-04-29T01:56:55
| 2018-04-29T01:56:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,965
|
py
|
'''
Created on Jan, 2018
@author: FrancesZhou
'''
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
class XML_CNN(object):
def __init__(self, max_seq_len, word_embedding, filter_sizes, label_output_dim, hidden_dim, args):
self.max_seq_len = max_seq_len
self.word_embedding_dim = word_embedding.shape[-1]
self.filter_sizes = filter_sizes
self.label_output_dim = label_output_dim
self.num_filters = args.num_filters
self.pooling_units = args.pooling_units
self.hidden_dim = hidden_dim
self.batch_size = args.batch_size
self.dropout_keep_prob = args.dropout_keep_prob
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer()
self.word_embedding = tf.constant(word_embedding, dtype=tf.float32)
#self.x = tf.placeholder(tf.float32, [self.batch_size, self.max_seq_len, self.word_embedding_dim])
self.x = tf.placeholder(tf.int32, [self.batch_size, self.max_seq_len])
self.y = tf.placeholder(tf.float32, [self.batch_size, self.label_output_dim])
def build_model(self):
# x: [batch_size, self.max_seq_len, self.embedding_dim]
# y: [batch_size, self.label_output_dim]
x = tf.nn.embedding_lookup(self.word_embedding, self.x)
x_expand = tf.expand_dims(x, axis=-1)
y = self.y
# dropout
#with tf.name_scope('dropout'):
# x_expand = tf.nn.dropout(x_expand, keep_prob=0.25)
conv_outputs = []
for i, filter_size in enumerate(self.filter_sizes):
with tf.name_scope('convolution-pooling-{0}'.format(filter_size)):
# ============= convolution ============
filter = tf.get_variable('filter-{0}'.format(filter_size),
[filter_size, self.word_embedding_dim, 1, self.num_filters],
initializer=self.weight_initializer)
conv = tf.nn.conv2d(x_expand, filter, strides=[1,1,1,1], padding='VALID', name='conv')
b = tf.get_variable('b-{0}'.format(filter_size), [self.num_filters])
conv_b = tf.nn.relu(tf.nn.bias_add(conv, b), 'relu')
# conv_b: [batch_size, seqence_length-filter_size+1, 1, num_filters]
# ============= dynamic max pooling =================
pool_size = (self.max_seq_len - filter_size + 1) // self.pooling_units
pool_out = tf.nn.max_pool(conv_b, ksize=[1, pool_size, 1, 1],
strides=[1, pool_size, 1, 1], padding='VALID', name='dynamic-max-pooling')
# pool_out: [batch_size, pooling_units, 1, num_filters]
pool_out = tf.reshape(pool_out, [self.batch_size, -1])
conv_outputs.append(pool_out)
all_features = tf.concat(conv_outputs, -1)
# dropout
# with tf.name_scope('dropout'):
# fea_dropout = tf.nn.dropout(all_features, keep_prob=self.dropout_keep_prob)
with tf.name_scope('output'):
fea_dim = all_features.get_shape().as_list()[-1]
# bottlenetck layer
w_b = tf.get_variable('bottleneck_w', [fea_dim, self.hidden_dim], initializer=self.weight_initializer)
l_hidden = tf.nn.relu(tf.matmul(all_features, w_b), 'relu')
# dropout layer
l_hidden_dropout = tf.nn.dropout(l_hidden, keep_prob=self.dropout_keep_prob)
# output layer
w_o = tf.get_variable('output_w', [self.hidden_dim, self.label_output_dim], initializer=self.weight_initializer)
#y_ = tf.nn.relu(tf.matmul(l_hidden_dropout, w_o), 'relu')
y_ = tf.matmul(l_hidden_dropout, w_o)
# loss
loss = tf.losses.sigmoid_cross_entropy(y, y_)
#print loss.get_shape().as_list()
return y_, y_, loss
|
[
"zhouxian@sjtu.edu.cn"
] |
zhouxian@sjtu.edu.cn
|
96df624ea6bd582ebb0abe1224a3de5593c6665c
|
393c8e9d26f89c9b4f1f24a160970dee437a36db
|
/shortener/forms.py
|
262f395e21bc7cd33d5826e02a21863f9680c29b
|
[] |
no_license
|
victorsemenov1980/Shorten-URL-Django
|
eb12b04573e7cd562e46510afb24062e0383eb39
|
5a854d8e061ea88c5b7abe992d6414dea1e06120
|
refs/heads/master
| 2022-11-05T06:52:24.551215
| 2020-06-17T11:13:34
| 2020-06-17T11:13:34
| 272,881,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 00:10:37 2020
@author: user
"""
from django import forms
from .models import url
class UrlForm(forms.Form):
url = forms.URLField(label='URL',)
hash_ = forms.CharField(label='Hash value - optional', max_length=15,required=False,)
|
[
"vs1378009@icloud.com"
] |
vs1378009@icloud.com
|
b5c607c76cf485cef3717ca12fb01cacd49003ca
|
c78ce4f66cc964c230ad60fbf2ced6b4811eab89
|
/0x04-python-more_data_structures/8-simple_delete.py
|
80e9e4f0e0199a69a96ccc4a5f11e897b5024a38
|
[] |
no_license
|
jebichii/holbertonschool-higher_level_programming-1
|
89026557909851dd775ae355f036db89ebd9adb9
|
741953aa479af90e8eac6f1315415eff4a20224f
|
refs/heads/master
| 2023-03-15T14:58:27.062528
| 2020-06-11T07:21:23
| 2020-06-11T07:21:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
#!/usr/bin/python3
def simple_delete(a_dictionary, key=""):
""" Delete a dictionary entry
"""
if a_dictionary is not None:
try:
del a_dictionary[key]
except KeyError:
pass
return a_dictionary
return None
|
[
"pdeyoreo@gmail.com"
] |
pdeyoreo@gmail.com
|
e0e4a5522cfe5345bb65f2fdec29cfc93476dd42
|
3c19870b8e937e5e360ad3cb97453e7e0d0e755f
|
/smartphoniker_wiki/urls.py
|
35b952496d8a0d687a254508c0fce7ab94d72c5a
|
[] |
no_license
|
M0r13n/wiki
|
37862d63f52565efc692c41cc762700de686b236
|
9b263771bb4554c67e52c07b38a845f53d6e97b7
|
refs/heads/master
| 2022-11-15T22:51:42.615490
| 2020-07-11T09:35:51
| 2020-07-11T09:35:51
| 278,831,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.http.response import HttpResponse
from django.urls import include
from django.urls import re_path
from django.views.static import serve as static_serve
admin.autodiscover()
urlpatterns = [
re_path(r"^admin/", admin.site.urls),
re_path(r"^robots.txt", lambda _: HttpResponse("User-agent: *\nDisallow: /")),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += [
re_path(
r"^media/(?P<path>.*)$",
static_serve,
{"document_root": settings.MEDIA_ROOT},
),
]
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns = [
re_path("__debug__/", include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
except ImportError:
pass
urlpatterns += [
re_path(r"^notify/", include("django_nyt.urls")),
re_path(r"", include("wiki.urls")),
]
handler500 = "smartphoniker_wiki.views.server_error"
handler404 = "smartphoniker_wiki.views.page_not_found"
|
[
"31622033+M0r13n@users.noreply.github.com"
] |
31622033+M0r13n@users.noreply.github.com
|
10e66a88c03e25b636ca4c2dfb9e251d60931935
|
8ccbadcfaebb9148b60978fc89290291625c14ec
|
/resolwe/storage/tests/test_views.py
|
3a61280c490fa45e4e5ce80f30f6490d8fb66848
|
[
"Apache-2.0"
] |
permissive
|
romunov/resolwe
|
f7e426c5965917045e23608c6c52a44b78d735c9
|
11a06a9d741dcc999253246919a0abc12127fd2a
|
refs/heads/master
| 2021-07-11T16:07:17.015683
| 2021-02-21T18:33:07
| 2021-02-25T13:49:02
| 229,769,285
| 0
| 0
|
Apache-2.0
| 2019-12-23T14:36:02
| 2019-12-23T14:36:01
| null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
"""Test resolwe.storage.views."""
import json
from unittest.mock import MagicMock, patch
from django.test import TestCase
from rest_framework import status
from resolwe.storage.views import UriResolverView
class UriResolverViewTest(TestCase):
"""Test UriResolverView."""
@patch("resolwe.storage.views.DataBrowseView._get_datum")
@patch("resolwe.storage.views.DataBrowseView._get_response")
def test_get(self, get_response_mock, get_datum_mock):
"""Test get method."""
get_datum_mock.return_value = MagicMock()
get_response_mock.side_effect = [
("signed_url1", True),
("dir_structure", False),
("signed_url2", True),
]
request = MagicMock()
request.GET.getlist.return_value = [
"123/file1.txt",
"456/dir",
"789/dir/file2.txt",
]
response = UriResolverView().get(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
json.loads(response.content.decode("utf-8")),
{
"123/file1.txt": "signed_url1",
"456/dir": "dir_structure",
"789/dir/file2.txt": "signed_url2",
},
)
|
[
"zmrzlikar.jure@gmail.com"
] |
zmrzlikar.jure@gmail.com
|
b0e870438ce45f557046f6ba32280a0ed8293072
|
bee9ab30f3230831030a74ecc4698d951011c0d2
|
/scripts/cnocr_predict.py
|
ab3b94ccf9178faec1c4bb57b4e74e16cd0bae31
|
[
"NCSA",
"Zlib",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause-Views",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
DCMMC/cnocr
|
2493f5f175cc4847cd75783eb2922a9c2a4ca789
|
ea66f8e87c155ff1959fdc1c3e526acd0dac8c74
|
refs/heads/master
| 2022-04-24T15:27:14.393242
| 2020-04-25T13:28:27
| 2020-04-25T13:28:27
| 258,252,551
| 0
| 0
|
Apache-2.0
| 2020-04-23T15:43:07
| 2020-04-23T15:43:06
| null |
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" An example of predicting CAPTCHA image data with a LSTM network pre-trained with a CTC loss"""
from __future__ import print_function
import sys
import os
import logging
import argparse
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cnocr import CnOcr
from cnocr.utils import set_logger
logger = set_logger(log_level=logging.INFO)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name", help="model name", type=str, default='conv-lite-fc'
)
parser.add_argument("--model_epoch", type=int, default=None, help="model epoch")
parser.add_argument("-f", "--file", help="Path to the image file")
parser.add_argument(
"-s",
"--single-line",
default=False,
help="Whether the image only includes one-line characters",
)
args = parser.parse_args()
ocr = CnOcr(model_name=args.model_name, model_epoch=args.model_epoch)
if args.single_line:
res = ocr.ocr_for_single_line(args.file)
else:
res = ocr.ocr(args.file)
logger.info("Predicted Chars: %s", res)
if __name__ == '__main__':
main()
|
[
"breezedeus@163.com"
] |
breezedeus@163.com
|
5d19425b9f79dca344d62f5713da2454035970cf
|
a1bffcd8854e1843e56bb812d4d83b3161a5211e
|
/tests/unit/module_utils/common/validation/test_check_type_bool.py
|
193622d5bc7dbf0eb681bea33c6c8f96045faacc
|
[] |
no_license
|
goneri/ansible.community
|
1a71f9d98c164b77f8ed2ed7f558b4963005ff8f
|
f26f612dd0a3154050d90b51a75502018c95f6e4
|
refs/heads/master
| 2020-12-29T07:47:35.353515
| 2020-01-22T17:43:18
| 2020-01-22T17:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible_collections.ansible.community.plugins.module_utils._text import to_native
from ansible_collections.ansible.community.plugins.module_utils.common.validation import check_type_bool
def test_check_type_bool():
test_cases = (
(True, True),
(False, False),
('1', True),
('on', True),
(1, True),
('0', False),
(0, False),
('n', False),
('f', False),
('false', False),
('true', True),
('y', True),
('t', True),
('yes', True),
('no', False),
('off', False),
)
for case in test_cases:
assert case[1] == check_type_bool(case[0])
def test_check_type_bool_fail():
default_test_msg = 'cannot be converted to a bool'
test_cases = (
({'k1': 'v1'}, 'is not a valid bool'),
(3.14159, default_test_msg),
(-1, default_test_msg),
(-90810398401982340981023948192349081, default_test_msg),
(90810398401982340981023948192349081, default_test_msg),
)
for case in test_cases:
with pytest.raises(TypeError) as e:
check_type_bool(case)
assert 'cannot be converted to a bool' in to_native(e.value)
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
b9eb17b596550fa1751c638e5d554839479d80dd
|
46279163a543cd8820bdc38133404d79e787c5d2
|
/test/distributed/_pipeline/sync/skip/test_stash_pop.py
|
7a5b16a39cff59a886e16432f0db67c0bc9af3fa
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
erwincoumans/pytorch
|
31738b65e7b998bfdc28d0e8afa7dadeeda81a08
|
ae9f39eb580c4d92157236d64548b055f71cf14b
|
refs/heads/master
| 2023-01-23T10:27:33.628897
| 2020-12-06T01:22:00
| 2020-12-06T01:23:40
| 318,930,000
| 5
| 1
|
NOASSERTION
| 2020-12-06T01:58:57
| 2020-12-06T01:58:56
| null |
UTF-8
|
Python
| false
| false
| 3,050
|
py
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed._pipeline.sync.skip import pop, skippable, stash
from torch.distributed._pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
@pytest.fixture(autouse=True)
def skip_tracker():
skip_tracker = SkipTracker()
with use_skip_tracker(skip_tracker):
yield skip_tracker
def test_stash(skip_tracker):
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
l1 = Stash()
assert len(skip_tracker.tensors) == 0
with use_skip_tracker(skip_tracker):
l1(torch.tensor(42))
assert len(skip_tracker.tensors) == 1
def test_pop():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo # noqa
l1 = Stash()
l2 = Pop()
output = l2(l1(torch.tensor(42)))
assert output.item() == 42
def test_declare_but_not_use():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
return input * 3
l1 = Stash()
l2 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(torch.tensor(42))
def test_stash_not_declared():
@skippable()
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
l1 = Stash()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_pop_not_declared():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
@skippable()
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo # noqa
l1 = Stash()
l2 = Pop()
latent = l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(latent)
def test_pop_not_stashed():
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
yield pop("foo")
l1 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_stash_none():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", None)
return input * 2 # noqa
l1 = Stash()
l1(torch.tensor(42))
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
cc7446671e306d6a6e487bb6ec3c82bb71b00587
|
b7125b27e564d2cc80a2ce8d0a6f934aa22c8445
|
/.history/display_board_20201103142917.py
|
8c63f44d74fa4ec5b0c7cf268870c0b69d958f52
|
[] |
no_license
|
JensVL96/Puzzle-solver-for-fun
|
4c15dcd570c3705b7ac555efb56b52913e81083c
|
6d8a4378a480372213a596a336a4deca727a00fc
|
refs/heads/master
| 2021-07-15T05:19:42.185495
| 2020-11-08T13:59:49
| 2020-11-08T13:59:49
| 224,855,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,433
|
py
|
from config import *
import pygame as pg
class Display_board():
def __init__(self, screen):
self.screen = screen
self.font_num = pg.font.SysFont("comicsans", NUMBER_SIZE)
self.font_cdt = pg.font.SysFont("comicsans", CANDIDATE_SIZE)
def draw_val(self, val, x, y):
text1 = self.font_num.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 15, y * BLOCK_SIZE + 15))
def draw_cdt(self, val, x, y):
text1 = self.font_cdt.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 1, y * BLOCK_SIZE + 1))
def on_mouse_press(self, x, y, symbol, modifier):
pass
def draw(self, grid):
for i in range (9):
for j in range (9):
if grid[i][j] != 0:
if type(grid[i][j]) != int:
new_line = 1
iteration = 0
for number in grid[i][j]:
if iteration % 3 == 1:
print("pos: 3, 6, 9")
new_line += 1
iteration = 0
elif (iteration - 1) % 3 == 1:
print("pos: 1, 4, 7")
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, ((TOP_LX + i * BLOCK_SIZE) + 10, (TOP_LY + j * BLOCK_SIZE) + new_line * CELL_SIZE))
else:
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, ((TOP_LX + i * BLOCK_SIZE) + iteration * CELL_SIZE, (TOP_LY + j * BLOCK_SIZE) + new_line * CELL_SIZE))
iteration += 1
else:
text1 = self.font_num.render(str(grid[i][j]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + i * BLOCK_SIZE + 15, TOP_LY + j * BLOCK_SIZE + 15))
for i in range(10):
if i %3 == 0:
thick = 7
else:
thick = 1
pg.draw.line(self.screen, BLACK, (TOP_LX,
TOP_LY + i * BLOCK_SIZE),
(TOP_RX,
TOP_RY + i * BLOCK_SIZE), thick)
pg.draw.line(self.screen, BLACK, (TOP_LX + i * BLOCK_SIZE,
TOP_LY),
(BOT_LX + i * BLOCK_SIZE,
BOT_LY), thick)
def update(self, grid, row, col, blk):
font_val = pg.font.SysFont("comicsans", BOLD)
if row != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[row[0]][row[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 15, TOP_LY + row[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[row[0]][row[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 14, TOP_LY + row[1] * BLOCK_SIZE + 10))
if col != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[col[0]][col[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 15, TOP_LY + col[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[col[0]][col[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 14, TOP_LY + col[1] * BLOCK_SIZE + 10))
if blk != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[blk[0]][blk[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 15, TOP_LY + blk[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[blk[0]][blk[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 14, TOP_LY + blk[1] * BLOCK_SIZE + 10))
def find_cell(self, x, y):
# Only applies glow when a cell is selected
if x == -1 and y == -1:
return
width = BLOCK_SIZE
height = BLOCK_SIZE
# Adjustment in size if bordering a thick line
if x % 3 == 0: # If thick line on the left
start_pos_x = TOP_LX + x * BLOCK_SIZE + 4
width = BLOCK_SIZE - 4
else:
start_pos_x = TOP_LX + x * BLOCK_SIZE + 1
if (x + 1) % 3 == 0: # If thick line on the right
width = BLOCK_SIZE - 3.5
if y % 3 == 0: # If thick line on the top
start_pos_y = TOP_LY + y * BLOCK_SIZE + 4
height = BLOCK_SIZE - 4
else:
start_pos_y = TOP_LY + y * BLOCK_SIZE + 1
if (y + 1) % 3 == 0: # If thick line on the bottom
height = BLOCK_SIZE - 3.5
return (start_pos_x, start_pos_y, width, height)
def blink(self, alpha, a_change):
if a_change:
alpha += BLINK_SPEED
if alpha >= 175:
a_change = False
elif a_change == False:
alpha += -BLINK_SPEED
if alpha <= 30:
a_change = True
return (alpha, a_change)
|
[
"jle040@uit.no"
] |
jle040@uit.no
|
c695f796fffd594178ab5a19127ff197690acb7c
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2016_08_31_polycrystal_FIP/plot_err_v_pc.py
|
ba4dcd807183302d80eb21fd3d85c224651fb99c
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693
| 2017-04-24T19:24:35
| 2017-04-24T19:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,561
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from constants import const
from matplotlib.ticker import AutoMinorLocator
import h5py
def plterr(par, upbnd, deg, Tvec, Hvec):
C = const()
colormat = cm.plasma(np.linspace(0, .8, len(Hvec)))
# colormat = cm.rainbow(np.linspace(0, .9, len(Hvec)))
linemat = ['-', '--', ':']
fig, ax = plt.subplots(figsize=[7, 5])
for ii in xrange(len(Hvec)):
for jj in xrange(len(Tvec)):
f = h5py.File("regression_results_L%s.hdf5" % Hvec[ii], 'r')
rlen = f.get('order_%s' % par).shape[0]
n_fac = f.get('Rpred_cal_%s' % par)[...].mean()
"""plot the prediction error versus number of pc"""
plotmat = np.zeros((rlen, 3))
plotmat[:, :2] = f.get('order_%s' % par)[...]
plotmat[:, 2] = f.get('meanerr_%s_%s' % (Tvec[jj], par))[...]
pc_range = len(np.unique(plotmat[:, 0]))
poly_range = len(np.unique(plotmat[:, 1]))
plotmat_ = plotmat.reshape((pc_range, poly_range, 3))
err = 100*plotmat_[..., deg-1, 2]/n_fac
if Tvec[jj] == 'cal':
label = 'calibration, L=%s'
elif Tvec[jj] == 'LOOCV':
label = 'LOOCV, L=%s'
elif Tvec[jj] == 'val':
label = 'validation, L=%s'
else:
label = 'L=%s'
plt.plot(np.arange(C['n_pc_max'])+1, err[:C['n_pc_max']],
marker='', markersize=8,
color=colormat[ii, :], alpha=0.7,
linestyle=linemat[jj], linewidth=2,
label=label % Hvec[ii])
f.close()
spc = np.int16(np.ceil(C['n_pc_max']/15.))
plt.xticks(np.arange(0, C['n_pc_max']+spc, spc))
minor_locator = AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minor_locator)
plt.grid(linestyle='-', alpha=0.15)
plt.grid(which='minor', linestyle='-', alpha=0.2)
plt.axis([.5, C['n_pc_max']+.5, 0, upbnd])
plt.legend(loc='upper right', shadow=True, fontsize='small', ncol=3)
plt.xlabel("number of PCs")
plt.ylabel("mean error (%)")
plt.tight_layout()
typs = ''.join(Tvec)
fig_name = 'selection_%s_%s_npc%s.png' % (typs, par, C['n_pc_max'])
fig.canvas.set_window_title(fig_name)
plt.savefig(fig_name)
if __name__ == '__main__':
C = const()
par = "strength"
upbnd = 3
Tvec = ['cal']
Hvec = [6, 15, 41, 90]
plterr(C, par, upbnd, Tvec, Hvec)
plt.show()
|
[
"noahhpaulson@gmail.com"
] |
noahhpaulson@gmail.com
|
83ed30f264265a018a2915a912dc401fb61247ad
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2630/60636/251138.py
|
6a6bbb79cd34380643c6e73cf9718a1721f76e69
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,007
|
py
|
def location(source,a):
for i in len(source):
for j in len(source[0])-1:
if(source[i][j]==a):
return [i,j]
def find(source,i,j,alls):
if i==0 and j==0:
return min([source[0][1],source[1][0]])
elif i==0 and j==len(source[i])-1:
if(not location(alls,min([source[0][j-1],source[1][j]])) in alls):
return min([source[0][j-1],source[1,j]])
elif j==0 and i==len(source)-1:
if(not location(alls,min([source[i-1][0],source[i][1]])) in alls):
return min([source[i-1][0],source[i][1]])
elif i==0:
if(not location(alls,min([source[i][j-1],source[i][j+1],source[i+1][j]])) in alls):
return min([source[i][j-1],source[i][j+1],source[i+1][j]])
elif j==0:
if(not location(alls,min([source[i-1][j],source[i+1][j],source[i][j+1]])) in alls):
return min([source[i-1][j],source[i+1][j],source[i][j+1]])
elif i==len(source)-1:
if(not location(alls,min([source[i-1][j],source[i][j-1],source[i][j+1]])) in alls):
return min([source[i-1][j],source[i][j-1],source[i][j+1]])
elif j==len(source[0])-1:
if(not location(alls,min([source[i-1][j],source[i+1][j],source[i][j-1]])) in alls):
return min([source[i-1][j],source[i+1][j],source[i][j-1]])
else:
if(not location(alls,min([source[i-1][j],source[i+1][j],source[i][j-1],source[i][j+1]])) in alls):
return min([source[i-1][j],source[i+1][j],source[i][j-1],source[i][j+1]])
grid=eval(input())
alls=[]
alls.append([0,0])
result=[]
result.append(grid[0][0])
while(True):
possiable=[]
print(alls)
for a in alls:
possiable.append(find(grid,a[0],a[1],alls))
result.append(min(possiable))
for i in range(len(grid)):
for j in range(len(grid[0])):
if(grid[i][j] in possiable):
alls.append([i,j])
if(grid[len(grid)-1][len(grid[0])-1] in result):
break
print(resullt)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
1e3d6016c25d1b2a8f583b4da27b2234c064d97e
|
d08b0a2ea1365e96c2143a3076d6f1cfce178321
|
/learnPython-master/news/ithome/demo.py
|
ab355ef2de9b7770e0ebab59189751875d274e5d
|
[] |
no_license
|
xueyes/py3_study
|
f64060e5dbfcbf11c8d61de8561ce90bbb4e3c19
|
a7d83b58ef95806f061f375952db604afe98bc13
|
refs/heads/master
| 2022-12-11T05:56:03.540612
| 2019-05-06T13:07:55
| 2019-05-06T13:07:55
| 162,883,421
| 1
| 0
| null | 2022-12-08T02:28:21
| 2018-12-23T11:02:31
|
HTML
|
UTF-8
|
Python
| false
| false
| 381
|
py
|
from lxml import etree
import requests
url = 'https://dyn.ithome.com/ithome/getajaxdata.aspx'
data = {
'newsID': '326707',
'hash': 'A379730C87B522EA',
'type': 'commentpage',
'page': '3',
'order': 'false',
}
html = requests.post(url, data=data).text
print(html)
# urls = 'https://dyn.ithome.com/comment/326707'
# html =requests.post(urls).text
# print(html)
|
[
"1401354763@qq.com"
] |
1401354763@qq.com
|
64e61e45e0a348bd5d50ae86d13b5f1dd0a34442
|
484a348682d9fa515666b94a5cd3a13b1b725a9e
|
/Data Structure/考试.py
|
00a3ea41fd9be666a23b1b977479e92d1ce53653
|
[] |
no_license
|
joseph-mutu/Codes-of-Algorithms-and-Data-Structure
|
1a73772825c3895419d86d6f1f506d58617f3ff0
|
d62591683d0e2a14c72cdc64ae1a36532c3b33db
|
refs/heads/master
| 2020-12-29T17:01:55.097518
| 2020-04-15T19:25:43
| 2020-04-15T19:25:43
| 238,677,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
#include <stdio.h>
#include <stdlib.h>
using namespace std;
int N;
typedef struct BiTree
{
int Data;
struct BiTree* left, *right;
}BiTree;
BiTree* CreatTree(int data1[], int data2[], int start, int ends, int &ind)
{
int mid = data1[ind];
int i;
for(i = ends; i >= start; i--)
{
if(mid == data2[i])
break;
}
BiTree *root = (BiTree*)malloc(sizeof(BiTree));
root->Data = mid, root->right = NULL, root->left = NULL;
if(i + 1 <= ends)
{
root->right = CreatTree(data1, data2, i + 1, ends, --ind);
}
if(i - 1 >= start)
{
root->left = CreatTree(data1, data2, start, i - 1, --ind);
}
return root;
}
void Preorder(BiTree* key)
{
if(key)
{
printf(" %d", key->Data);
Preorder(key->left);
Preorder(key->right);
}
}
int main()
{
scanf("%d", &N);
int K1[100], K2[100];// K1 hou xu K2 zheng xu
int i;
for(i = 0; i < N; i++)
{
int n;
scanf("%d", &n);
K1[i] = n;
}
for(i = 0; i < N; i++)
{
int n;
scanf("%d", &n);
K2[i] = n;
}
BiTree *One;
int k = N - 1;
One = CreatTree(K1, K2, 0, N - 1, k);//建立二叉树
printf("Preorder:");
Preorder(One);//遍历二叉树
printf("\n");
return 0;
}
|
[
"josephmathone@gmail.com"
] |
josephmathone@gmail.com
|
5b7db5c2f10832e93c05e4accf948b198fd38fc5
|
5c4515960dcbfd3861d06d90b8c9bde0bdf3ecf5
|
/Iserlab/migrations/0044_auto_20170306_0956.py
|
649dec546e754ed99d60b1d6b99ffde77ce3b431
|
[] |
no_license
|
Mathilda1992/mcysite
|
66bb2f51de622b7f7c450664c798eb11ce195cae
|
def82e43474ecc734c6cbb26842bd87f698b2b88
|
refs/heads/master
| 2021-01-11T19:58:23.611196
| 2017-06-26T08:58:11
| 2017-06-26T08:58:11
| 79,434,975
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-06 09:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Iserlab', '0043_score_socre_time'),
]
operations = [
migrations.AlterField(
model_name='score',
name='score',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"machenyi2011@163.com"
] |
machenyi2011@163.com
|
ce924970e5c11232b741a9273bf79d4aec02ecf8
|
42ed6d4e67172522f79ab6f3c8cb650f4234be90
|
/grpc/client/main.py
|
55d9fa17b36fef38b4fffb7106dd8f1ebf9684f2
|
[] |
no_license
|
Hylan129/Self-Learning
|
81a5143015850c33d5226c4da43d110150661dc7
|
06ccdc202f62629395900658909e1363a32632fd
|
refs/heads/master
| 2020-06-12T19:45:50.479677
| 2020-05-26T15:38:26
| 2020-05-26T15:38:26
| 194,405,633
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import grpc
from example import data_pb2, data_pb2_grpc
_HOST = 'localhost'
_PORT = '8080'
def run():
conn = grpc.insecure_channel(_HOST + ':' + _PORT)
client = data_pb2_grpc.FormatDataStub(channel=conn)
response = client.DoFormat(data_pb2.Data(text='hello,world!'))
print("received: " + response.text)
if __name__ == '__main__':
run()
|
[
"jyzyg129@163.com"
] |
jyzyg129@163.com
|
b4953030c41f9a1add974681fbeb2f552d932ccf
|
beb4d7c16ea8d8da9747b94298891cf01d9466f6
|
/users/urls.py
|
c7b3b09544316033c571df99cedc218e2fa6a055
|
[] |
no_license
|
Chirag-Django/nonstopio_assignment
|
48985445a19f8d01c1f0565e8058cd032942d903
|
b1f1561e841857ea64d9a5170974704a347cc0e3
|
refs/heads/master
| 2023-03-05T07:05:49.963018
| 2021-02-20T18:35:29
| 2021-02-20T18:35:29
| 340,347,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
app_name='users'
urlpatterns = [
path('register/', views.register, name='register'),
path('update_profile/',views.update_profile,name='update_profile'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/',auth_views.LogoutView.as_view(template_name='logout.html'),name='logout'),
]
|
[
"chirag.django@gmail.com"
] |
chirag.django@gmail.com
|
ba0317152df2c5141cbc08cfba662a6ad4943890
|
e7c1f3af60c030b97916e4f431172ebdbc07b2a9
|
/django_mindscape/tests/test_dependencies.py
|
c02066d0898bbc0482536e51e8faff115d00fe83
|
[] |
no_license
|
podhmo/django-mindscape
|
d1df56c217750fee3cf3c24a49591bf98bbb6e89
|
2da92e155b7aaf465d631258f9799eb2e3d59671
|
refs/heads/master
| 2016-08-03T12:04:42.090545
| 2015-03-10T19:12:14
| 2015-03-10T19:12:14
| 31,464,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,128
|
py
|
# -*- coding:utf-8 -*-
import unittest
from evilunit import test_target
@test_target("django_mindscape:Walker")
class ForeignKeyTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp"
class Member(models.Model):
group = models.ForeignKey(Group)
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp"
cls.Group = Group
cls.Member = Member
def _get_models(self):
return [self.Group, self.Member]
def test_dependecies__member(self):
walker = self._makeOne(self._get_models())
walker.walkall()
member_dependencies = [node.to.model for node in walker[self.Member].dependencies]
self.assertEqual(member_dependencies, [self.Group])
def test_dependecies__group(self):
walker = self._makeOne(self._get_models())
walker.walkall()
self.assertEqual(walker[self.Group].dependencies, [])
def test_relation_type(self):
walker = self._makeOne(self._get_models())
walker.walkall()
reltype = walker[self.Member].dependencies[0].type
self.assertEqual(reltype, "M1")
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, "member_set")
@test_target("django_mindscape:Walker")
class RelatedNameTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myappp"
class Member(models.Model):
group = models.ForeignKey(Group, related_name="+")
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myappp"
cls.Group = Group
cls.Member = Member
def _get_models(self):
return [self.Group, self.Member]
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, None)
@test_target("django_mindscape:Walker")
class ManyToManyTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp2"
class Member(models.Model):
group_set = models.ManyToManyField(Group, through="GroupToMember")
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp2"
class GroupToMember(models.Model):
member = models.ForeignKey(Member)
group = models.ForeignKey(Group)
class Meta:
app_label = "myapp2"
cls.Group = Group
cls.Member = Member
cls.GroupToMember = GroupToMember
def _get_models(self):
return [self.Group, self.Member]
def test_relation_type(self):
walker = self._makeOne(self._get_models())
walker.walkall()
reltype = walker[self.Member].dependencies[0].type
self.assertEqual(reltype, "MM")
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group_set")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, "member_set")
def test_relation_through(self):
walker = self._makeOne(self._get_models())
walker.walkall()
through = walker[self.Member].dependencies[0].through
self.assertEqual(through.model, self.GroupToMember)
@test_target("django_mindscape:Walker")
class OneToOneTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp3"
class Member(models.Model):
group = models.OneToOneField(Group)
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp3"
cls.Group = Group
cls.Member = Member
def _get_models(self):
return [self.Group, self.Member]
def test_relation_type(self):
walker = self._makeOne(self._get_models())
walker.walkall()
reltype = walker[self.Member].dependencies[0].type
self.assertEqual(reltype, "11")
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, "member")
|
[
"podhmo+altair@beproud.jp"
] |
podhmo+altair@beproud.jp
|
aef95b48318caf1b38584776aca1d4f9936f4928
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_2_1_neat/16_2_1_Math_getting_digit.py
|
ba10364f80fe35b453998f9f7dfea738adce6858
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
#!/usr/bin/python3
import sys
def remove_letters(letters, word, ref_letter):
if (ref_letter in letters and letters[ref_letter] > 0):
number = letters[ref_letter]
for ll in word:
letters[ll] -= number
return(number)
else:
return(0)
def solve_number(SS, case):
letters = dict()
for ll in SS:
if (not ll in letters):
letters[ll] = 1
else:
letters[ll] += 1
numbers = [0 for ii in range(10)]
numbers[0] = remove_letters(letters, 'ZERO', 'Z')
numbers[2] = remove_letters(letters, 'TWO', 'W')
numbers[8] = remove_letters(letters, 'HEIGHT', 'G')
numbers[6] = remove_letters(letters, 'SIX', 'X')
numbers[7] = remove_letters(letters, 'SEVEN', 'S')
numbers[5] = remove_letters(letters, 'FIVE', 'V')
numbers[4] = remove_letters(letters, 'FOUR', 'F')
numbers[9] = remove_letters(letters, 'NINE', 'I')
numbers[1] = remove_letters(letters, 'ONE', 'N')
numbers[3] = remove_letters(letters, 'THREE', 'T')
print("Case #" + str(case) + ": " + ''.join([ str(ii) for ii in range(10) for jj in range(numbers[ii]) ]))
TT = int(input())
for ii in range(TT):
SS = input()
solve_number(SS, ii+1)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
7458e345152467d791ea25abac7dba7b3fe448a1
|
bc6ebddfd13b36d5e394f7b5eb15444f7467f5e5
|
/sundial/utils.py
|
2670fc6d9ad15404fea7729d884697854ba0c8d8
|
[
"MIT"
] |
permissive
|
Surgo/django-sundial
|
28bbe8da723ca45fe926750e78ae568b1c5f0092
|
e6a3f69d61a49a5d7ae2b053cdd79289e11a8a73
|
refs/heads/master
| 2021-01-17T06:09:01.327135
| 2015-03-12T09:13:12
| 2015-03-12T09:13:12
| 32,067,215
| 0
| 0
| null | 2015-03-12T09:01:48
| 2015-03-12T09:01:48
|
Python
|
UTF-8
|
Python
| false
| false
| 709
|
py
|
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.conf import settings
from django.utils.text import force_text
from django.utils.translation import ugettext_lazy as _
import pytz
TIMEZONE_SESSION_KEY = getattr(
settings, 'SUNDIAL_TIMEZONE_SESSION_KEY', '_timezone'
)
def set_session_timezone(session, zone):
session[TIMEZONE_SESSION_KEY] = force_text(zone)
def get_session_timezone(session):
return session.get(TIMEZONE_SESSION_KEY)
def coerce_timezone(zone):
try:
return pytz.timezone(zone)
except pytz.UnknownTimeZoneError:
raise ValidationError(
_('Unknown timezone.'), code='invalid'
)
|
[
"charette.s@gmail.com"
] |
charette.s@gmail.com
|
27cfac09bd2fc5432cc9463f81553242ffb3e67f
|
0de5fbd2c992388d572bfb4c114f82741a351b8e
|
/0x06-Basic_authentication/api/v1/auth/auth.py
|
55326846343b39f19a21e2e490782c8f2570f96d
|
[] |
no_license
|
veromejia/holbertonschool-web_back_end
|
3fac1ad905d02bb6d351092210d005a211053df4
|
44258bb53104da95c6df8a69dd066233cb0daae0
|
refs/heads/main
| 2023-02-10T01:49:31.439587
| 2021-01-05T00:44:46
| 2021-01-05T00:44:46
| 305,465,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
#!/usr/bin/env python3
""" Define Auth class """
from flask import request
from typing import List, TypeVar
class Auth:
"""Auth Class that handle public methods"""
def require_auth(self, path: str, excluded_paths: List[str]) -> bool:
"""Check if path requires authentication"""
if path is None or excluded_paths is None:
return True
if path[-1] is not '/':
path += '/'
wildcards = [p[:-1] for p in excluded_paths if p[-1] == '*']
for p in wildcards:
if path.startswith(p):
return False
return False if path in excluded_paths else True
def authorization_header(self, request=None) -> str:
"""Check if request is authorized"""
if request is None or 'Authorization' not in request.headers:
return None
return request.headers.get('Authorization')
def current_user(self, request=None) -> TypeVar('User'):
""" Return current user """
|
[
"veromejia.q@gmail.com"
] |
veromejia.q@gmail.com
|
626e573ed0bbb8e22c948eeb92c9e96d8ffb4782
|
be6e6d8af85adf044bf79676b7276c252407e010
|
/spec/construct/test_switch_manual_enum_invalid.py
|
66cd2a7da5b985e6bb318912bb2ad2cc7e62bf3c
|
[
"MIT"
] |
permissive
|
kaitai-io/kaitai_struct_tests
|
516e864d29d1eccc5fe0360d1b111af7a5d3ad2b
|
3d8a6c00c6bac81ac26cf1a87ca84ec54bf1078d
|
refs/heads/master
| 2023-08-19T19:42:47.281953
| 2023-08-04T20:26:50
| 2023-08-04T20:26:50
| 52,155,797
| 12
| 41
|
MIT
| 2023-07-30T23:30:30
| 2016-02-20T13:55:39
|
Ruby
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from switch_manual_enum_invalid import _schema
class TestSwitchManualEnumInvalid(unittest.TestCase):
def test_switch_manual_enum_invalid(self):
r = _schema.parse_file('src/enum_negative.bin')
self.assertEqual(len(r.opcodes), 2)
self.assertEqual(r.opcodes[0].code, 255)
self.assertIsNone(r.opcodes[0].body)
self.assertEqual(r.opcodes[1].code, 1)
self.assertIsNone(r.opcodes[1].body)
|
[
"petr.pucil@seznam.cz"
] |
petr.pucil@seznam.cz
|
f71da1fd302633769fc6a1663d9ca71769b093c6
|
eddbfe4eb1aa3052cb8d03097cca2673ae207ec0
|
/books/migrations/0003_auto_20210429_1839.py
|
63f82e9831d65f0c47c2a14623a7f4bae6ac1eab
|
[] |
no_license
|
luiz158/CRUD_Django
|
f40d630f09916a0cd2d06a076415873122c02098
|
852242b34c7ff4781c9704df032865e83ded9bf3
|
refs/heads/master
| 2023-07-02T08:58:45.637229
| 2021-08-04T21:57:03
| 2021-08-04T21:57:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
# Generated by Django 3.2 on 2021-04-29 18:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('books', '0002_alter_book_date_inclusion'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='author',
),
migrations.AddField(
model_name='book',
name='author',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='books.author'),
),
]
|
[
"jonathan.mothe@gmail.com"
] |
jonathan.mothe@gmail.com
|
4efd9e8caa0fa82f83d43e5d2702eae94ce0a814
|
2db7597686f33a0d700f7082e15fa41f830a45f0
|
/Python/BinaryTree/1448. 统计二叉树中好节点的数目.py
|
f1622e85dcf75ab740d30faccdc8934d48e9b3c6
|
[] |
no_license
|
Leahxuliu/Data-Structure-And-Algorithm
|
04e0fc80cd3bb742348fd521a62bc2126879a70e
|
56047a5058c6a20b356ab20e52eacb425ad45762
|
refs/heads/master
| 2021-07-12T23:54:17.785533
| 2021-05-17T02:04:41
| 2021-05-17T02:04:41
| 246,514,421
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
'''
1448. 统计二叉树中好节点的数目
给你一棵根为 root 的二叉树,请你返回二叉树中好节点的数目。
「好节点」X 定义为:从根到该节点 X 所经过的节点中,没有任何节点的值大于 X 的值。
'''
# Definition for a BT node
class TreeNode:
def __init__(self, x = 0):
self.val = x
self.left = None
self.right = None
def goodNodes(root):
if root == None:
return 0
def find(root, maxVal):
if root == None:
return
next_maxVal = maxVal
if root.val >= maxVal:
self.res += 1
next_maxVal = root.val
find(root.left, next_maxVal)
find(root.right, next_maxVal)
return
self.res = 0
find(root, float('-inf'))
return self.res
|
[
"leahxuliu@gmail.com"
] |
leahxuliu@gmail.com
|
3f9a50bca0bdd49ad832b81e37cc2acb7ef94337
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/aQWEQDoWiNbryG8xs_13.py
|
74b473ad6cce9d4cfa59205686035d2b90514e34
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
"""
Create a function that takes a number `num` and returns the first 10 multiples
of `num` with 1 added to it, separated by commas.
### Examples
n_tables_plus_one(7) ➞ "8,15,22,29,36,43,50,57,64,71"
n_tables_plus_one(1) ➞ "2,3,4,5,6,7,8,9,10,11"
n_tables_plus_one(3) ➞ "4,7,10,13,16,19,22,25,28,31"
### Notes
There is no comma after the last number.
"""
def n_tables_plus_one(num):
Final = ''
for item in range(1,10+1):
Final += str((num*item)+1)+','
return Final[:-1]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6cb50044646a8ed42437fa9dd67f027154040955
|
241e8be8fbd310384e5669b64142cf173ecf692b
|
/mickey/admin.py
|
65504f3ffe946c80f308fdd06f1d942f53fbe379
|
[
"MIT"
] |
permissive
|
raihanba13/mickey
|
a1e61cf71419b61acfc2240336c8a8cebf712ceb
|
6a151145d11687760eae14749b7ee150c9d5a044
|
refs/heads/master
| 2022-06-10T03:22:18.869733
| 2020-05-07T11:31:20
| 2020-05-07T11:31:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,993
|
py
|
from django.contrib import admin
from django.utils.html import format_html
from django.contrib import messages
from django.contrib.contenttypes.admin import GenericTabularInline
from mickey.models import *
from mickey.widgets import *
import logging
logger = logging.getLogger(__name__)
# Register your models here.
def get_message_bit(rows_updated, model_name):
if model_name == "category":
message_bit = "1 category was" if rows_updated == 1 else "%s categories were" % rows_updated
elif model_name == "post":
message_bit = "1 post was" if rows_updated == 1 else "%s posts were" % rows_updated
elif model_name == "tag":
message_bit = "1 tag was" if rows_updated == 1 else "%s tags were" % rows_updated
elif model_name == "comment":
message_bit = "1 comment was" if rows_updated == 1 else "%s comment were" % rows_updated
return message_bit
class PostAdminForm(forms.ModelForm):
model = Post
class Meta:
fields = '__all__'
widgets = {
'content': HtmlEditor(attrs={'style': 'width: 90%; height: 100%;'}),
'short_content': HtmlEditor(attrs={'style': 'width: 90%; height: 100%;'}),
}
@admin.register(Media)
class MediaAdmin(admin.ModelAdmin):
list_display = ("id", "image","md_image","sm_image", "created_by", "created_at")
fieldsets = (
("Required Information", {
"description": "These fields are required for each Media",
"fields": (
('image', 'image_tag'),
),
}),
("Optional Information", {
'classes': ('collapse',),
'fields': (
('md_image','sm_image'),
)
})
)
readonly_fields = ('image_tag',)
def image_tag(self, obj):
logger.critical(obj.image.url)
return format_html('<img src="{}" width="160" height="135"/>'.format(obj.image.url))
image_tag.short_description = 'Image'
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
fields = (('parent','name'),'active')
list_display = ("id", "name", "parent", "active" ,"created_by")
actions = ['make_category_active', 'make_category_deactivate']
def make_category_active(self, request, queryset):
rows_updated = queryset.update(active=True)
message_bit = "1 category was" if rows_updated == 1 else "%s categories were" % rows_updated
self.message_user(request, "%s activated successfully." % message_bit)
def make_category_deactivate(self, request, queryset):
rows_updated = queryset.update(active=False)
message_bit = "1 category was" if rows_updated == 1 else "%s categories were" % rows_updated
self.message_user(request, "%s deactivated successfully." % message_bit)
make_category_active.short_description = "Active selected categories"
make_category_deactivate.short_description = "Deactivate selected categories"
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ("id", "name", "created_by")
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
form = PostAdminForm
list_display = ("title", "category", "published", "archive", "created_by", "created_at")
search_fields = ['title','category__name','published']
list_filter = ('category__name', 'published', 'archive','created_at')
fieldsets = (
("Required Information", {
"description": "These fields are required for each post",
"fields": (
('category', 'title'), ('content',), ('tags',)
),
}),
("Optional Information", {
'classes': ('collapse',),
'fields': (
('cover_image','image_tag'),
('published','archive',),
('short_content',),
('slug',)
)
})
)
actions = ['make_archive','remove_archive','publish_post','unpublish_post']
readonly_fields = ('image_tag',)
def make_archive(self, request, queryset):
rows_updated = queryset.update(archive=True)
self.message_user(request, "%s archived successfully." % get_message_bit(rows_updated,'post'))
def remove_archive(self, request, queryset):
rows_updated = queryset.update(archive=False)
self.message_user(request, "%s published from archive successfully." % get_message_bit(rows_updated,'post'))
def unpublish_post(self, request, queryset):
rows_updated = queryset.update(published=False)
self.message_user(request, "%s unpublished successfully." % get_message_bit(rows_updated,'post'))
def publish_post(self, request, queryset):
rows_updated = queryset.update(published=True)
self.message_user(request, "%s published successfully." % get_message_bit(rows_updated,'post'))
def image_tag(self, obj):
try:
return format_html('<img src="{}" width="160" height="135"/>'.format(obj.cover_image.image.url))
except:
return ""
image_tag.short_description = 'Post Image'
make_archive.short_description = "Archive selected post"
remove_archive.short_description = "Publish selected post from archive"
publish_post.short_description = "Publish selected post"
unpublish_post.short_description = "Unpublish selected post"
@admin.register(React)
class ReactAdmin(admin.ModelAdmin):
list_display = ("id","blog", "type", "amount",)
search_fields = ['blog__title', 'type']
list_filter = ('blog', 'type',)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ("id","post", "parent", "name", "active", "created_at")
fields = (('post','parent'),('name', ), 'body', ('active',))
list_filter = ('post', 'name', 'active')
actions = ['make_activate','make_deactivate',]
def make_activate(self, request, queryset):
rows_updated = queryset.update(active=True)
self.message_user(request, "%s activated successfully." % get_message_bit(rows_updated,'comment'))
def make_deactivate(self, request, queryset):
rows_updated = queryset.update(active=False)
self.message_user(request, "%s deactivated successfully." % get_message_bit(rows_updated,'comment'))
make_activate.short_description = "Active selected comments"
make_deactivate.short_description = "Deactivate selected comments"
@admin.register(SiteInformation)
class DJSiteAdmin(admin.ModelAdmin):
list_display = ("title", "tagline", "created_by")
fieldsets = (
("Required Information", {
"description": "These fields are required for DJ Site Information",
"fields": (
('title', ),
),
}),
("Optional Information", {
'classes': ('collapse',),
'fields': (
('header_title','tagline',),
'footer_text'
)
})
)
|
[
"farhapartex@gmail.com"
] |
farhapartex@gmail.com
|
9f79014a99de26096629779af1c9279f8319b7b4
|
86813bf514f3e0257f92207f40a68443f08ee44b
|
/0072 编辑距离/0072 编辑距离.py
|
09a71367fdde0319dcd6e517d8d2183aa808a77f
|
[] |
no_license
|
Aurora-yuan/Leetcode_Python3
|
4ce56679b48862c87addc8cd870cdd525c9d926c
|
720bb530850febc2aa67a56a7a0b3a85ab37f415
|
refs/heads/master
| 2021-07-12T13:23:19.399155
| 2020-10-21T03:14:36
| 2020-10-21T03:14:36
| 212,998,500
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
# label: dynamic programming difficulty: difficult
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
#用dp[i][j]表示word1[:i + 1], word2[:j + 1]这个问题的解
m, n = len(word1), len(word2)
dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
dp[i][0] = i
for i in range(n + 1):
dp[0][i] = i
for i in range(1, m + 1):
for j in range(1, n + 1):
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i - 1][j], dp[i - 1][j - 1], dp[i][j - 1]) #分别对应插入,替换,删除
return dp[m][n]
|
[
"noreply@github.com"
] |
Aurora-yuan.noreply@github.com
|
7ae7e78b80d63d83fad51f24b644042cd5b26dc0
|
128c32834fa8156a25e5693131991525ea33020b
|
/2016.1/Exércicio LAB DE PROGRAMAÇÃO/Exercício 2016.1/Exemplos_Realizados_em_Sala/TabelaHashOficial.py
|
32d11b51e1e5f60f2130fc55a4f5649f77574158
|
[] |
no_license
|
wellington16/BSI-UFRPE
|
5780e94b4c10b3ee8885d01fc14f4050e6907611
|
268d0e5beabf211df1aa69cbe52ac1e0cb85fe64
|
refs/heads/master
| 2020-06-30T16:59:59.316415
| 2020-03-10T13:22:31
| 2020-03-10T13:22:31
| 66,642,156
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,527
|
py
|
class No:
def __init__(self, valor):
self.valor = valor
self.prox = None
self.ant = None
def getValor(self):
return self.valor
def setValor(self, novodado):
self.prox = novoDado
def getNovValor(self):
return self.prox
def setNovValor(self, novoNo):
self.prox = novoNo
def getAntValor(self):
return self.ant
def setAntValor(self, novoNo):
self.ant = novoNo
class ListEncad:
def __init__(self):
self._inicio = None
self._fim = None
# Verifica se a lista está vazia
def listVazia(self):
return (self._inicio is None) or (self._fim is None)
#Inseri no inicio
def InserirNoInicio(self, valor):
NovoNo = No(valor)
if self.listVazia():
self._inicio = self._fim = NovoNo
else:
self._inicio.setAntValor(NovoNo)
NovoNo.setNovValor(self._inicio)
NovoNo.setAntValor(None)
self._inicio = NovoNo
#inseri no fim
def InserirNoFim(self, valor):
NovoNo = No(valor)
if self.listVazia():
self._inicio = self._fim = NovoNo
else:
self._fim.setNovValor(NovoNo)
NovoNo.setAntValor(self._fim)
NovoNo.setNovValor(None)
self._fim = NovoNo
#pesquisa o valor
def pesquisar (self, valor):
if self.listVazia():
return None
NoAtual = self._inicio
while NoAtual.getValor() != valor:
NoAtual = NoAtual.getNovValor()
if NoAtual == None:
return "Esse valor não foi encontrado!"
return NoAtual.getValor
#Função Impirmir
def __str__(self):
NoAtual = self._inicio
if self.listVazia():
return(" Este valor não existe.")
texto = ''
while NoAtual != None:
texto = str(NoAtual.getValor())+ " "
print(NoAtual.getValor())
NoAtual = NoAtual.getNovValor()
return texto
#Função remover
def remover(self, valor):
NoAtual = self._inicio
if self.listVazia():
return None
while NoAtual.getValor() != valor:
NoAtual = NoAtual.getNovValor()
if NoAtual == None:
return "O valor não está na lista"
if self._inicio == self._fim:
self._inicio = self._fim = None
return None
elif NoAtual == self._inicio:
aux = self._inicio.getNovValor()
self._inicio.setNovValor(None)
aux.setNovValor(None)
self._inicio = aux
elif NoAtual == self._fim:
aux = self._fim.getAntValor()
self._fim.setAntValor(None)
aux.setNovValor(None)
self._fim = aux
else:
aux = NoAtual.getAntValor()
aux2 = NoAtual.getNovValor()
aux2.setAntValor(aux)
aux.setNovValor(aux2)
#Função esvaiziar lista
def esvaziarList(self):
self._inicio = self._fim = None
class Pilha(ListEncad):
#Função remover no final da pilha
def desempilhar(self):
if self.listVazia():
return
else:
UltmValNo = self._fim.getValor()
if self._inicio is self._fim:
self._inicio = self.fim = None
else:
aux1 = self._fim.getAntValor()
self._fim.setAntValor(None)
aux1.setNovValor(None)
self._fim = aux1
return UltmValNo
class Fila(ListEncad):
#Função remover no inicio da fila
def removerInicio(self):
if self.listVazia():
return" A fila está vazia!"
else:
PrimValNo = self._inicio.getValor()
if self._inicio is self._fim:
self._inicio = self._fim = None
else:
aux2 = self._inicio.getNovValor()
self._inicio.setNovValor(None)
aux2.setAntValor(None)
self._inicio = aux2
return PrimValNo
class Item():
def __init__(self, chave, valor):
self._chave = chave
self._valor = valor
def __str__(self):
chav = self.getChave()
valor1 = self.getValor()
chav = str(chav)
valor1 = str(valor1)
elemt = "Chave = "+ chav +". O valor = "+ valor1+ "\n"
return elemt
def getChave(self):
return self._chave
def setChave(self, chave):
self._chave = chave
def getValor(self):
return self._valor
def setValor(self, valor):
self._valor = valor
class Hash:
def __init__(self, tamanho):
self.tamanho = tamanho
self._table = [None] * tamanho
def FuncHash(self, chave):
return chave % self.tamanho
def pesquisarItem(self, chave):
x = self.FuncHash(chave)
l = self._table[x]
if l == None:
return None
h = l._inicio
while h != None:
if h.getValor().getValor() == chave:
return h.getValor.getValor()
h = h.getNovValor()
return None
def inserir(self, chave, valor):
valorHash = self.FuncHash(chave)
#print(valorHash)
item = Item(chave,valor)
if (self._table[valorHash] == None):
listx = ListEncad()
listx.InserirNoInicio(item)
self._table[valorHash]= listx
else:
self._table[valorHash].InserirNoInicio(item)
def delete(self, chave):
v = self.listar(chave)
if v != "Nao Existe":
g = v._inicio
while (g != None):
if g.getValor().getChave() == chave:
if v._inicio != v._fim:
if g == v._inicio:
p = v._inicio.getNovValor()
p.setAntValor(None)
v._inicio = p
elif g == v._fim:
a = v._fim.getAntValor()
a.setNovValor(None)
v._fim = a
else:
a = g.getAntValor()
p = g.getNovValor()
p.setAntValor(a)
a.setNovValor(p)
else:
v._inicio = None
v._fim = None
g = g.getNovValor()
else:
return ("Não existe esse elemento na tabela")
def listar(self, chave):
valorHash = self.FuncHash(chave)
if self._table[valorHash] != None:
return self._table[valorHash]
else:
return 0
def __str__(self):
textox = ''
for x in self._table:
if x == None:
pass
else:
textox += str(x.__str__() + "\n")
return textox
novatabelinha = Hash(5)
novatabelinha.inserir(1, 45)
novatabelinha.inserir(3, 67)
novatabelinha.inserir(5, 5)
novatabelinha.inserir(2, 44)
#print(novatabelinha)
novatabelinha.listar(5)
#novatabelinha.delete(1)
novatabelinha.pesquisarItem(2)
print(novatabelinha)
|
[
"wjfilmagens@hotmail.com"
] |
wjfilmagens@hotmail.com
|
b5e65556cb0df5cb435e365882c7a0da7fe6731e
|
ac1bbabc7c1b3149711c416dd8b5f5969a0dbd04
|
/Python Advanced/comprehensions/heroes_inventory.py
|
1ae5e593c8b94b15f8fb01e9809e0e0be37b5b93
|
[] |
no_license
|
AssiaHristova/SoftUni-Software-Engineering
|
9e904221e50cad5b6c7953c81bc8b3b23c1e8d24
|
d4910098ed5aa19770d30a7d9cdf49f9aeaea165
|
refs/heads/main
| 2023-07-04T04:47:00.524677
| 2021-08-08T23:31:51
| 2021-08-08T23:31:51
| 324,847,727
| 1
| 0
| null | 2021-08-08T23:31:52
| 2020-12-27T20:58:01
|
Python
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
heroes = input().split(', ')
command = input()
heroes_inventory = {hero: [] for hero in heroes}
while not command == "End":
data = command.split('-')
name, item, cost = data
if name in heroes_inventory:
if heroes_inventory[name]:
if item not in heroes_inventory[name][0]:
heroes_inventory[name][0].append(item)
heroes_inventory[name][1].append(int(cost))
else:
heroes_inventory[name] = [[item], [int(cost)]]
command = input()
for name, [item, cost] in heroes_inventory.items():
print(f"{name} -> Items: {len(item)}, Cost: {sum(cost)}")
|
[
"assiaphristova@gmail.com"
] |
assiaphristova@gmail.com
|
27f120ae877d4f79cb6762956b3002c61edeb0ca
|
bde402f8375dc12f1a337d534e4ed217023fd1d2
|
/setup.py
|
18226d5ce7c5d447cabe839a450b0e052abc3db6
|
[] |
no_license
|
CONNJUR/nmrglue
|
975d386a5128db6904041a57f833b34980ec9170
|
9ee6d6278d1d2be87648bb4903f3948fb6447da1
|
refs/heads/master
| 2020-04-20T16:52:19.143719
| 2019-02-03T17:51:44
| 2019-02-03T17:51:44
| 168,971,497
| 0
| 0
| null | 2019-02-03T17:22:07
| 2019-02-03T17:22:06
| null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
#!/usr/bin/env python
# setup script for nmrglue
from distutils.core import setup
from codecs import open
from os import path, walk
here = path.abspath(path.dirname(__file__))
# get long description from README
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='nmrglue',
version='0.7-dev', # change this in nmrglue/__init__.py also
description='A module for working with NMR data in Python',
long_description=long_description,
url='http://www.nmrglue.com',
author='Jonathan J. Helmus',
author_email='jjhelmus@gmail.com',
license='New BSD License',
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux'],
requires=['numpy', 'scipy'],
packages=[
'nmrglue',
'nmrglue.analysis',
'nmrglue.analysis.tests',
'nmrglue.fileio',
'nmrglue.fileio.tests',
'nmrglue.process',
'nmrglue.process.nmrtxt',
'nmrglue.util'],
package_data={'nmrglue': [
'fileio/tests/data/*.f*',
'fileio/tests/data/*.dir/*',
'fileio/tests/data/test.tab']},
)
|
[
"jjhelmus@gmail.com"
] |
jjhelmus@gmail.com
|
63ab12f6fb3b539ccfbf9e77397ab0cef69e7a12
|
cb2c9c33b993e14fec3db34cdbaf04dabdf60ad1
|
/2018/17/solve
|
4d8fb57693bb81b44e1058eb285023126ead0e59
|
[] |
no_license
|
rr-/aoc
|
51e95711d3eaf5de4b80bcd57c90750c1c09252d
|
babc68340eb46dac42981e700435bd740ff3c625
|
refs/heads/master
| 2020-04-10T00:26:08.388243
| 2018-12-25T15:08:06
| 2018-12-25T15:17:17
| 160,685,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,368
|
#!/usr/bin/env python3
import re
import typing as T
from collections import defaultdict
from pathlib import Path
from PIL import Image
WELL_X = 500
WELL_Y = 0
AIR = "."
CLAY = "#"
WELL = "+"
STILL_WATER = "~"
RUNNING_WATER = "|"
def parse_chunk(text: str) -> T.List[int]:
num = list(map(int, re.findall(r"\d+", text)))
if len(num) == 2:
return list(range(num[0], num[1] + 1))
elif len(num) == 1:
return [num[0]]
raise AssertionError
class World:
def __init__(self, text: str) -> None:
self.cells: T.Dict[T.Tuple[int, int], str] = defaultdict(lambda: AIR)
for line in text.split("\n"):
if not line:
continue
chunk_x, chunk_y = sorted(line.split())
xs = parse_chunk(chunk_x)
ys = parse_chunk(chunk_y)
for x in xs:
for y in ys:
self.cells[x, y] = CLAY
self.x1 = min(key[0] for key in self.cells.keys()) - 1
self.x2 = max(key[0] for key in self.cells.keys()) + 1
self.y1 = min(key[1] for key in self.cells.keys())
self.y2 = max(key[1] for key in self.cells.keys())
self.w = self.x2 + 1 - self.x1
self.h = self.y2 + 1 - self.y1
self.cells[WELL_X, WELL_Y] = WELL
self.heads: T.List[T.Tuple[int, int]] = [(WELL_X, WELL_Y)]
def turn(self) -> bool:
while self.heads:
x, y = self.heads.pop(0)
if self.cells[x, y] == WELL:
self.cells[x, y + 1] = RUNNING_WATER
self.heads.append((x, y + 1))
return True
if self.cells[x, y] != RUNNING_WATER:
continue
if self.cells[x, y + 1] == AIR:
self.cells[x, y + 1] = RUNNING_WATER
if y + 1 < self.y2:
self.heads.append((x, y + 1))
return True
if self.cells[x, y + 1] in {STILL_WATER, CLAY}:
ret = False
if self.cells[x - 1, y] == AIR:
self.cells[x - 1, y] = RUNNING_WATER
self.heads.append((x - 1, y))
ret = True
if self.cells[x + 1, y] == AIR:
self.cells[x + 1, y] = RUNNING_WATER
self.heads.append((x + 1, y))
ret = True
if ret:
return True
x1 = x2 = x
while self.cells[x1 - 1, y] == RUNNING_WATER:
x1 -= 1
while self.cells[x2 + 1, y] == RUNNING_WATER:
x2 += 1
if self.cells[x1 - 1, y] == CLAY and self.cells[x2 + 1, y] == CLAY:
for x in range(x1, x2 + 1):
self.cells[x, y] = STILL_WATER
if self.cells[x, y - 1] == RUNNING_WATER:
self.heads.append((x, y - 1))
return True
return False
def save_image(self, path: Path) -> None:
img = Image.new("RGB", (self.w, self.h), "black")
pixels = img.load()
colors: T.Dict[str, T.Tuple[int, int, int]] = {
AIR: (0, 0, 0),
RUNNING_WATER: (0, 0, 255),
STILL_WATER: (0, 0, 128),
WELL: (255, 255, 0),
CLAY: (256, 200, 0),
}
for x, y in self.cells.keys():
if self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2:
pixels[x - self.x1, y - self.y1] = colors[self.cells[x, y]]
img.save(str(path), format="png")
def part1(world: World) -> int:
total = 0
for pos, cell in world.cells.items():
x, y = pos
if y < world.y1 or y > world.y2:
continue
if cell in {STILL_WATER, RUNNING_WATER}:
total += 1
return total
def part2(world: World) -> int:
total = 0
for pos, cell in world.cells.items():
x, y = pos
if y < world.y1 or y > world.y2:
continue
if cell == STILL_WATER:
total += 1
return total
def main() -> None:
text = Path(__file__).with_name("input.txt").read_text()
world = World(text)
while True:
if not world.turn():
break
world.save_image(Path(__file__).with_name("image.png"))
print(part1(world))
print(part2(world))
if __name__ == "__main__":
main()
|
[
"rr-@sakuya.pl"
] |
rr-@sakuya.pl
|
|
540ec97d8fd8b38e391df681d5f04875976ab585
|
243335dfe75c72f4e94ff953f5b0851d2e116cb1
|
/model/simple_graphs.py
|
dd2a0ecdd9d8c05fc516c00c757c9509420d2282
|
[] |
no_license
|
bwhub/generalizable-device-placement
|
f485aea87b8a297cc3212014f3674fd9bad8df49
|
d9a81a9f6cb05bfc94773722a4e7ead793ca7fd1
|
refs/heads/master
| 2022-02-24T13:24:43.374572
| 2019-10-25T01:02:52
| 2019-10-25T01:02:52
| 298,305,058
| 1
| 0
| null | 2020-09-24T14:32:56
| 2020-09-24T14:32:55
| null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
import networkx as nx
# d is the number of chains
def makeChainGraph(N, d=2):
G = nx.DiGraph()
def add_edge(i, j):
G.add_edge(str(i), str(j))
'''
for N = 4, d = 2
1 2 3 4
0 9
5 6 7 8
Lowest Runtime: (N+2) + l_fact* 2
'''
n = 1
for i in range(d):
add_edge(0, n)
for j in range(N-1):
add_edge(n, n+1)
n += 1
add_edge(n, N*d + 1)
n += 1
assert n == N*d + 1
cost = {}
out_size = {}
for i in G.nodes():
cost[i] = 1
out_size[i] = 1
nx.set_node_attributes(G, cost, 'cost')
nx.set_node_attributes(G, out_size, 'out_size')
G.d = d
return G
def makeEdgeGraph(N):
G = nx.DiGraph()
for i in range(N):
G.add_edge(2*i, 2*i + 1)
cost = {}
out_size = {}
for i in G.nodes():
cost[i] = 1
out_size[i] = 1
nx.set_node_attributes(G, cost, 'cost')
nx.set_node_attributes(G, out_size, 'out_size')
return G
def makeCrownGraph(N, d=2):
G = nx.DiGraph()
def add_edge(i, j):
G.add_edge(str(i), str(j))
'''
for N = 4, d = 2
8
/ /\ \
/ / \ \
/ / \ \
4 -> 5 -> 6 -> 7
^ ^ ^ ^
| | | |
0 -> 1 -> 2 -> 3
'''
for i in range(d):
for j in range(N):
n = N*i + j
if j != (N - 1):
add_edge(n, n + 1)
if i > 0:
add_edge(N* (i-1) + j, n)
if i == d - 1:
add_edge(n, N* d)
cost = {}
out_size = {}
for i in G.nodes():
cost[i] = 1
out_size[i] = .5
nx.set_node_attributes(G, cost, 'cost')
nx.set_node_attributes(G, out_size, 'out_size')
G.d = d
return G
|
[
"addanki@mit.edu"
] |
addanki@mit.edu
|
44db05656099ea323b7329dabe2deea43a7f61fe
|
29c71deb76575eb7142f5e798745ccda8dd5d366
|
/salesapp/cart.py
|
91c2657add7953b609ab17ee0029f8001f8cac73
|
[] |
no_license
|
bill0812/salesSystem
|
c0b992949183ce8df8cd6c3a1470b17a5f6dc33b
|
4ff17f52bac911959b7b7fff0c5e046d5471ed66
|
refs/heads/master
| 2020-03-29T10:04:57.736379
| 2020-03-01T12:04:10
| 2020-03-01T12:04:10
| 149,788,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
from firebase import firebase
import hashlib,re
url = "https://sales-system-project.firebaseio.com/"
fb = firebase.FirebaseApplication(url, None)
def fetch_cart(account):
customer_data = fb.get("/會員資料/"+account+"/購物車/客製化",None)
product_data = fb.get("/會員資料/"+account+"/購物車/產品資訊",None)
return customer_data,product_data
def upload_normal(name,count,account):
data = dict()
product_data = fb.get("/會員資料/"+account+"/購物車/產品資訊/"+name,None)
if product_data == None:
product_detail = fb.get("/產品資訊/"+name,None)
data = {
"數量" : count,
"總價格" : int(product_detail["價格"]) * int(count),
"產品資訊" : product_detail["種類"]
}
fb.put("/會員資料/"+account+"/購物車/產品資訊/", data = data , name = name)
else:
product_detail = fb.get("/產品資訊/"+name,None)
data = {
"數量" : int(product_data["數量"]) + int(count),
"總價格" : int(product_data["總價格"]) + int(product_detail["價格"]) * int(count),
"產品資訊" : product_detail["種類"]
}
fb.put("/會員資料/"+account+"/購物車/產品資訊/", data = data , name = name)
|
[
"maxwell111023@gmail.com"
] |
maxwell111023@gmail.com
|
1874a9fa74e68180d06cdde9266507f63280c99c
|
b167407960a3b69b16752590def1a62b297a4b0c
|
/tools/project-creator/Python2.6.6/Lib/test/test_aepack.py
|
46ec71b7ce3f537a8a7011abdb870791b1e84d7a
|
[
"MIT"
] |
permissive
|
xcode1986/nineck.ca
|
543d1be2066e88a7db3745b483f61daedf5f378a
|
637dfec24407d220bb745beacebea4a375bfd78f
|
refs/heads/master
| 2020-04-15T14:48:08.551821
| 2019-01-15T07:36:06
| 2019-01-15T07:36:06
| 164,768,581
| 1
| 1
|
MIT
| 2019-01-15T08:30:27
| 2019-01-09T02:09:21
|
C++
|
UTF-8
|
Python
| false
| false
| 2,617
|
py
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import aepack
import aetypes
import os
from test import test_support
class TestAepack(unittest.TestCase):
OBJECTS = [
aetypes.Enum('enum'),
aetypes.Type('type'),
aetypes.Keyword('kwrd'),
aetypes.Range(1, 10),
aetypes.Comparison(1, '< ', 10),
aetypes.Logical('not ', 1),
aetypes.IntlText(0, 0, 'international text'),
aetypes.IntlWritingCode(0,0),
aetypes.QDPoint(50,100),
aetypes.QDRectangle(50,100,150,200),
aetypes.RGBColor(0x7000, 0x6000, 0x5000),
aetypes.Unknown('xxxx', 'unknown type data'),
aetypes.Character(1),
aetypes.Character(2, aetypes.Line(2)),
]
def test_roundtrip_string(self):
o = 'a string'
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_int(self):
o = 12
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_float(self):
o = 12.1
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_None(self):
o = None
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o, unpacked)
def test_roundtrip_aeobjects(self):
for o in self.OBJECTS:
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(repr(o), repr(unpacked))
def test_roundtrip_FSSpec(self):
try:
import Carbon.File
except:
return
if not hasattr(Carbon.File, "FSSpec"):
return
o = Carbon.File.FSSpec(os.curdir)
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o.as_pathname(), unpacked.as_pathname())
def test_roundtrip_Alias(self):
try:
import Carbon.File
except:
return
if not hasattr(Carbon.File, "FSSpec"):
return
o = Carbon.File.FSSpec(os.curdir).NewAliasMinimal()
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
self.assertEqual(o.FSResolveAlias(None)[0].as_pathname(),
unpacked.FSResolveAlias(None)[0].as_pathname())
def test_main():
test_support.run_unittest(TestAepack)
if __name__ == '__main__':
test_main()
|
[
"278688386@qq.com"
] |
278688386@qq.com
|
ce32fd8f2071c627a7f0902be8dfa99ab9d61d03
|
bc441bb06b8948288f110af63feda4e798f30225
|
/topboard_sdk/model/ops_automation/jobs_pb2.py
|
9233835a9eb0089af4f4dc15c4c5b04c2d1ac53e
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 5,796
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jobs.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topboard_sdk.model.ops_automation import bind_resource_pb2 as topboard__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2
from topboard_sdk.model.ops_automation import mail_info_pb2 as topboard__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='jobs.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\njobs.proto\x12\x0eops_automation\x1a\x35topboard_sdk/model/ops_automation/bind_resource.proto\x1a\x31topboard_sdk/model/ops_automation/mail_info.proto\"\xc1\x01\n\x04Jobs\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x02 \x01(\t\x12\x0e\n\x06menuId\x18\x03 \x01(\t\x12\x32\n\x0c\x62indResource\x18\x04 \x01(\x0b\x32\x1c.ops_automation.BindResource\x12\x0c\n\x04\x64\x65sc\x18\x05 \x01(\t\x12\x13\n\x0b\x61llowModify\x18\x06 \x01(\x08\x12&\n\x04mail\x18\x07 \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\n\n\x02id\x18\x08 \x01(\tBJZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[topboard__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBS = _descriptor.Descriptor(
name='Jobs',
full_name='ops_automation.Jobs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ops_automation.Jobs.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='ops_automation.Jobs.category', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuId', full_name='ops_automation.Jobs.menuId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bindResource', full_name='ops_automation.Jobs.bindResource', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='ops_automation.Jobs.desc', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowModify', full_name='ops_automation.Jobs.allowModify', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.Jobs.mail', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.Jobs.id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=330,
)
_JOBS.fields_by_name['bindResource'].message_type = topboard__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2._BINDRESOURCE
_JOBS.fields_by_name['mail'].message_type = topboard__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['Jobs'] = _JOBS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Jobs = _reflection.GeneratedProtocolMessageType('Jobs', (_message.Message,), {
'DESCRIPTOR' : _JOBS,
'__module__' : 'jobs_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.Jobs)
})
_sym_db.RegisterMessage(Jobs)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
3edc5c35853123a22c04959e95910dfd09412079
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/modules/test_flatten.py
|
cf2ada9bbbdcad6b4174bcacb1aeef712538c9af
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 4,076
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_flatten(test_case, device):
m = flow.nn.Flatten()
x = flow.Tensor(32, 2, 5, 5, device=flow.device(device))
flow.nn.init.uniform_(x)
y = m(x)
test_case.assertTrue(y.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y.numpy().flatten(), x.numpy().flatten()))
y2 = flow.flatten(x, start_dim=2)
test_case.assertTrue(y2.shape == flow.Size((32, 2, 25)))
test_case.assertTrue(np.array_equal(y2.numpy().flatten(), x.numpy().flatten()))
y3 = x.flatten(start_dim=1)
test_case.assertTrue(y3.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y3.numpy().flatten(), x.numpy().flatten()))
y4 = x.flatten(start_dim=1, end_dim=2)
test_case.assertTrue(y4.shape == flow.Size((32, 10, 5)))
test_case.assertTrue(np.array_equal(y4.numpy().flatten(), x.numpy().flatten()))
y5 = flow.flatten(x)
test_case.assertTrue(y5.shape == flow.Size((1600,)))
test_case.assertTrue(np.array_equal(y5.numpy().flatten(), x.numpy().flatten()))
def _test_flatten_backward(test_case, device):
m = flow.nn.Flatten().to(flow.device(device))
x = flow.Tensor(2, 3, 4, 5, device=flow.device(device))
x.requires_grad = True
flow.nn.init.uniform_(x)
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(np.array_equal(np.ones(shape=(2, 3, 4, 5)), x.grad.numpy()))
@flow.unittest.skip_unless_1n1d()
class TestFlattenModule(flow.unittest.TestCase):
def test_cast(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_flatten, _test_flatten_backward]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=5)
def test_flatten_module_with_random_data(test_case):
m = torch.nn.Flatten(
start_dim=random(1, 6) | nothing(), end_dim=random(1, 6) | nothing()
)
m.train(random())
device = random_device()
m.to(device)
x = random_tensor().to(device)
y = m(x)
return y
@autotest(n=5)
def test_flatten_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@autotest(n=5, auto_backward=False, check_graph=True)
def test_flatten_bool_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device=device, dtype=torch.bool)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@autotest(n=5)
def test_flatten_with_0dim_data(test_case):
device = random_device()
x = random_tensor(ndim=0).to(device)
y = torch.flatten(
x,
start_dim=random(1, 6).to(int) | nothing(),
end_dim=random(1, 6).to(int) | nothing(),
)
return y
@profile(torch.flatten)
def profile_flatten(test_case):
torch.flatten(torch.ones(1000, 1000))
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
Oneflow-Inc.noreply@github.com
|
14ba40653e5a91516f606e964c3cc7999beb2bd4
|
2ee7195d71993838829e06f26347f76a2433931b
|
/test_backtest/T0backtest.py
|
6b49876c013292907cee777def029de94403fe24
|
[
"MIT"
] |
permissive
|
imgreenbird/QUANTAXIS
|
0a056de2c3961f5d0b7d0e17782f34b25593e5fb
|
88eac434135a92cd64bd035cd844b34020729747
|
refs/heads/master
| 2020-03-23T14:27:23.003742
| 2018-07-20T01:25:27
| 2018-07-20T01:25:27
| 141,676,903
| 3
| 0
|
MIT
| 2018-07-20T07:11:09
| 2018-07-20T07:11:09
| null |
UTF-8
|
Python
| false
| false
| 3,821
|
py
|
# coding: utf-8
# In[1]:
from QUANTAXIS.QAARP.QAStrategy import QA_Strategy
from QUANTAXIS.QAARP.QAAccount import QA_Account
from QUANTAXIS.QAUtil.QAParameter import (AMOUNT_MODEL, MARKET_TYPE,
FREQUENCE, ORDER_DIRECTION,
ORDER_MODEL,RUNNING_ENVIRONMENT)
import random
# In[2]:
class MAMINT0Strategy(QA_Account):
def __init__(self,init_hold={'000001':10000}):
super().__init__(init_hold=init_hold)
self.account_cookie = 'T0BACKTEST'
self.running_environment=RUNNING_ENVIRONMENT.TZERO
self.frequence = FREQUENCE.FIFTEEN_MIN
self.market_type = MARKET_TYPE.STOCK_CN
def on_bar(self, event):
try:
for item in event.market_data.code:
print('================')
print(self.sell_available)
print('================')
print(self.hold_available)
if self.sell_available.get(item, 0) > 0:
event.send_order(account_id=self.account_cookie,
amount=self.sell_available[item], amount_model=AMOUNT_MODEL.BY_AMOUNT,
time=self.current_time, code=item, price=0,
order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.SELL,
market_type=self.market_type, frequence=self.frequence,
broker_name=self.broker
)
else:
event.send_order(account_id=self.account_cookie,
amount=100, amount_model=AMOUNT_MODEL.BY_AMOUNT,
time=self.current_time, code=item, price=0,
order_model=ORDER_MODEL.MARKET, towards=ORDER_DIRECTION.BUY,
market_type=self.market_type, frequence=self.frequence,
broker_name=self.broker)
except:
pass
# In[3]:
from QUANTAXIS.QAARP.QARisk import QA_Risk
from QUANTAXIS.QAARP.QAUser import QA_User
from QUANTAXIS.QABacktest.QABacktest import QA_Backtest
from QUANTAXIS.QAUtil.QALogs import QA_util_log_info
from QUANTAXIS.QAUtil.QAParameter import FREQUENCE, MARKET_TYPE
class Backtest(QA_Backtest):
'''
多线程模式回测示例
'''
def __init__(self, market_type, frequence, start, end, code_list, commission_fee):
super().__init__(market_type, frequence, start, end, code_list, commission_fee)
self.user = QA_User()
t0strategy=MAMINT0Strategy()
# maminstrategy.reset_assets(1000)
# self.portfolio, self.account = self.user.register_account(mastrategy)
self.user = QA_User(user_cookie='user_admin')
self.portfolio = self.user.new_portfolio('folio_admin')
self.portfolio, self.account = self.user.register_account(t0strategy)
def after_success(self):
QA_util_log_info(self.account.history_table)
risk = QA_Risk(self.account, benchmark_code='000300',
benchmark_type=MARKET_TYPE.INDEX_CN)
print(risk().T)
self.account.save()
risk.save()
risk.plot_assets_curve()
print(risk.profit_construct)
# In[4]:
import QUANTAXIS as QA
backtest = Backtest(market_type=MARKET_TYPE.STOCK_CN,
frequence=FREQUENCE.FIFTEEN_MIN,
start='2017-11-01',
end='2017-12-10',
code_list=['000001'],
commission_fee=0.00015)
backtest.start_market()
backtest.run()
backtest.stop()
# In[5]:
backtest.account.history_table
|
[
"yutiansut@qq.com"
] |
yutiansut@qq.com
|
20a9b35189e8cc10558e34e4a5f9b23e8b6e8215
|
c733e6b433914a8faba256c7853f5cf2cd39c62a
|
/Python/Leetcode Daily Practice/Stack/907.Sum of Subarray Minimums.py
|
27776a90d32a21b05ff11eaa3a7e22e5d544f69d
|
[] |
no_license
|
YaqianQi/Algorithm-and-Data-Structure
|
3016bebcc1f1356b6e5f3c3e588f3d46c276a805
|
2e1751263f484709102f7f2caf18776a004c8230
|
refs/heads/master
| 2021-10-27T16:29:18.409235
| 2021-10-14T13:57:36
| 2021-10-14T13:57:36
| 178,946,803
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
class Solution(object):
def sumSubarrayMins(self, A):
MOD = 10**9 + 7
stack = []
dot, ans = 0, 0
for num in A:
cnt = 1
while stack and stack[-1][0] >= num:
n, c = stack.pop(-1)
cnt += c
dot -= n * c
stack.append((num, cnt))
dot += num * cnt
ans += dot
return ans % MOD
"""res = 0
for i in range(len(A)):
for j in range(i, len(A)):
res += min(A[i:j+1])
print(A[i:j+1], min(A[i:j+1]))"""
return ans
if __name__=="__main__":
A = [1,7,5,2,4,3,9]
# - - - -
# 1 1 1 1 1 1 1 : 7
# 7 5 2 2 2 2 : 20
# 5 2 2 2 2 : 13
# 2 2 2 2 : 8
# 4 3 3 : 10
# 3 3 : 6
# 9 : 9
# 73
# print(sum(B))
sol = Solution()
print(sol.sumSubarrayMins(A))
# Input:
A = [3,1,2,4]
# 3 1 1 1
# 1 1 1
# 2 2
# 4
# Output: 17
# Explanation: Subarrays are [3], [1], [2], [4], [3,1], [1,2], [2,4], [3,1,2], [1,2,4], [3,1,2,4].
# Minimums are 3, 1, 2, 4, 1, 1, 2, 1, 1, 1. Sum is 17.
sol = Solution()
# print(sol.sumSubarrayMins(A))
|
[
"alicia.qyq@gmail.com"
] |
alicia.qyq@gmail.com
|
5d54aa954b5a801f72d85eef577ed6856517acbf
|
cccd1ede83f9391238893f3862e7beff999647e1
|
/rw_and_plot/15_1_cubes.py
|
efe10175eca18c8f2ef07cef6f815f60667a78fa
|
[] |
no_license
|
SMS-NED16/pcc-data-vis
|
c7c136e32921619af52b46cdbf12f6debaa8a690
|
f9750ee947163335c351a6df453f5d2dab87d855
|
refs/heads/master
| 2020-03-09T02:32:19.494589
| 2018-04-04T12:11:40
| 2018-04-04T12:11:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
import matplotlib.pyplot as plt
x = list(range(1, 6))
five_cubes = [val ** 3 for val in x]
thousand_cubes = [ val ** 3 for val in range(1, 5001)]
plt.subplot(1, 2, 1)
plt.scatter(x, five_cubes, s=40)
plt.title("Graph of first five cubes", fontsize=14)
plt.xlabel("Values", fontsize=12)
plt.ylabel("Cube of Value", fontsize=12)
plt.tick_params(labelsize=12, axis='both',which='major')
plt.subplot(1, 2, 2)
x = list(range(1, 5001))
plt.scatter(x, thousand_cubes,
c = thousand_cubes, cmap = plt.cm.Blues, edgecolor='none',)
plt.title("Graph of five thousand cubes", fontsize=14)
plt.xlabel("Values", fontsize=12)
plt.ylabel("Cube of Values", fontsize=12)
plt.tick_params(labelsize=12, axis='both',which='major')
plt.show()
|
[
"saadmsiddiqui96@gmail.com"
] |
saadmsiddiqui96@gmail.com
|
824d5eea34380df61a582e52a5a070cac3dff314
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.8_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=48/sched.py
|
afb4c0412030f6f941ad2606c1757d3218fe3c14
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
-X FMLP -Q 0 -L 1 132 400
-X FMLP -Q 0 -L 1 125 400
-X FMLP -Q 0 -L 1 65 250
-X FMLP -Q 0 -L 1 48 175
-X FMLP -Q 1 -L 1 45 250
-X FMLP -Q 1 -L 1 40 125
-X FMLP -Q 1 -L 1 36 250
-X FMLP -Q 1 -L 1 34 150
-X FMLP -Q 2 -L 1 26 175
-X FMLP -Q 2 -L 1 25 125
-X FMLP -Q 2 -L 1 22 150
-X FMLP -Q 2 -L 1 22 100
-X FMLP -Q 3 -L 1 20 200
-X FMLP -Q 3 -L 1 19 150
-X FMLP -Q 3 -L 1 18 150
16 150
16 100
13 300
10 125
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
7d2398a97349b50e741469065939c3bdc7116573
|
5d45174eef86562b6c90a4bc07c86258df249486
|
/hyak/launcher.py
|
7b06e8c0e6a491706816db94253c46ac6fdff997
|
[] |
no_license
|
bmorris3/shampoo
|
a70dd3b1896c7a4f2e88413c13cae96d80f21c71
|
853c6668efef3e7b69727ea45ff9eff419e9a70b
|
refs/heads/master
| 2023-05-27T04:30:02.756962
| 2018-03-28T14:38:09
| 2018-03-28T14:38:09
| 41,105,695
| 22
| 12
| null | 2021-04-27T11:06:07
| 2015-08-20T16:08:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,342
|
py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from glob import glob
import numpy as np
python_path = '/usr/lusers/bmmorris/miniconda2/bin/python'
data_dir = '/gscratch/stf/bmmorris/shamu/2015.12.15_17-47'
output_dir = '/gscratch/stf/bmmorris/shamu/outputs'
python_script = '/usr/lusers/bmmorris/git/shampoo/hyak/hyak_jobs.py'
raw_hologram_paths = sorted(glob(os.path.join(data_dir, '*_holo.tif')))
submit_template = open('submit_template.sh', 'r').read()
walltime = '01:00:00'
email = 'bmmorris@uw.edu'
# Divide holograms to assign 14 per node at a time
n_jobs_per_node = 16
n_repeats_per_node = 2
all_hologram_indices = np.arange(len(raw_hologram_paths))
hologram_index_groups = np.array_split(all_hologram_indices,
len(all_hologram_indices) //
(n_jobs_per_node*n_repeats_per_node) + 1)
for i, split_hologram_indices in enumerate(hologram_index_groups):
hologram_paths = [raw_hologram_paths[j] for j in split_hologram_indices]
# Create input jobs to pipe to "parallel" command:
command_list_path = os.path.join(output_dir,
'command_list_{0:02d}.txt'.format(i))
with open(command_list_path, 'w') as command_file:
for holo_path in hologram_paths:
line = "{0} {1} {2} {3}\n".format(python_path, python_script,
holo_path, output_dir)
command_file.write(line)
submit_script_name = os.path.join(output_dir,
'submit_script_{0:02d}.sh'.format(i))
submit_script = submit_template.format(job_name="shampoo_test",
run_dir=output_dir,
log_dir=output_dir,
walltime=walltime,
email=email,
command_list_path=command_list_path,
n_jobs_per_node=n_jobs_per_node)
submit_script_path = os.path.join(output_dir, submit_script_name)
with open(submit_script_path, 'w') as f:
f.write(submit_script)
os.system('qsub {0}'.format(submit_script_path))
|
[
"brettmorris21@gmail.com"
] |
brettmorris21@gmail.com
|
e06d7832c60e9bb8324a477b100c0e87fb4cfe26
|
c3274a346ddcf09c9ec70e1402daa34ad0ac44af
|
/examples/dataframe/dataframe_sum.py
|
f8171d3e92e95fabd061003fe34051a41b89a867
|
[
"BSD-2-Clause"
] |
permissive
|
vishalbelsare/hpat
|
cb6b39ddeb07c319c88e132df9cee4c6adb0a415
|
eb5efbad9bfec67db88b52474c4bd00238b61283
|
refs/heads/master
| 2023-04-06T14:59:35.723023
| 2023-03-10T16:44:51
| 2023-03-10T16:44:51
| 161,796,133
| 0
| 0
|
BSD-2-Clause
| 2023-03-19T09:18:25
| 2018-12-14T14:35:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
Expected result:
A 1.0
B 10.0
C inf
dtype: float64
"""
import pandas as pd
import numpy as np
from numba import njit
@njit
def dataframe_sum():
df = pd.DataFrame({"A": [.2, .0, .6, .2],
"B": [2, 0, 6, 2],
"C": [-1, np.nan, 1, np.inf]})
return df.sum()
print(dataframe_sum())
|
[
"noreply@github.com"
] |
vishalbelsare.noreply@github.com
|
2d221ec37b42ee7d6d78140f67e53a4798e29806
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/appconfiguration/v20200701preview/list_configuration_store_keys.py
|
1c5a925503a75002b4536769dfcd2cc6de923821
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 3,284
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListConfigurationStoreKeysResult',
'AwaitableListConfigurationStoreKeysResult',
'list_configuration_store_keys',
]
@pulumi.output_type
class ListConfigurationStoreKeysResult:
"""
The result of a request to list API keys.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The URI that can be used to request the next set of paged results.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.ApiKeyResponseResult']]:
"""
The collection value.
"""
return pulumi.get(self, "value")
class AwaitableListConfigurationStoreKeysResult(ListConfigurationStoreKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListConfigurationStoreKeysResult(
next_link=self.next_link,
value=self.value)
def list_configuration_store_keys(config_store_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
skip_token: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListConfigurationStoreKeysResult:
"""
Use this data source to access information about an existing resource.
:param str config_store_name: The name of the configuration store.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str skip_token: A skip token is used to continue retrieving items after an operation returns a partial result. If a previous response contains a nextLink element, the value of the nextLink element will include a skipToken parameter that specifies a starting point to use for subsequent calls.
"""
__args__ = dict()
__args__['configStoreName'] = config_store_name
__args__['resourceGroupName'] = resource_group_name
__args__['skipToken'] = skip_token
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:appconfiguration/v20200701preview:listConfigurationStoreKeys', __args__, opts=opts, typ=ListConfigurationStoreKeysResult).value
return AwaitableListConfigurationStoreKeysResult(
next_link=__ret__.next_link,
value=__ret__.value)
|
[
"noreply@github.com"
] |
test-wiz-sec.noreply@github.com
|
670ee8f2f3d080e83ece3e6e319c7b2d5c3ec218
|
d8dfe2bb29965f2bf00724caaa4d5f3f02715002
|
/crater/operations/expand_dims.py
|
c5c91efc7cd1005456f0b37dce2f1d1912a19e56
|
[] |
no_license
|
malyvsen/kth-deep-learning
|
20fc0d89c0b81ea97af77b627f0ee46458310126
|
17b3140043aaa81cf86a6a9b7fed3295ee48b061
|
refs/heads/main
| 2023-05-05T10:02:29.764591
| 2021-05-13T08:35:25
| 2021-05-13T08:35:25
| 353,112,929
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
from typing import Union, Tuple
import numpy as np
from crater.tensor import Tensor
from crater.gradient import Gradients, Gradient
from crater.utils import tuplify
from .coalesce import coalesce
def expand_dims(tensor: Tensor, axes: Union[None, int, Tuple[int]] = None):
tensor = coalesce(tensor)
axes = () if axes is None else tuplify(axes)
return Tensor.from_numpy(
data=np.expand_dims(tensor.data, axes),
backward=lambda gradient: Gradients.accumulate(
Gradient(tensor=tensor, gradient=np.squeeze(gradient, axes))
),
)
Tensor.expand_dims = expand_dims
|
[
"5940672+malyvsen@users.noreply.github.com"
] |
5940672+malyvsen@users.noreply.github.com
|
104f75c625a5c419f721c085bd4d90f8ac2b482c
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/third_party/blink/tools/blinkpy/w3c/monorail_unittest.py
|
14a22ac6fb4b1feb36e0116e4dc30575cf05e1d7
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034
| 2020-11-16T01:40:14
| 2020-11-16T01:40:14
| 209,262,645
| 18
| 21
|
BSD-3-Clause
| 2023-03-23T06:20:07
| 2019-09-18T08:52:07
| null |
UTF-8
|
Python
| false
| false
| 4,252
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.w3c.monorail import MonorailAPI, MonorailIssue
class MonorailIssueTest(unittest.TestCase):
def test_init_succeeds(self):
# Minimum example.
MonorailIssue('chromium', summary='test', status='Untriaged')
# All fields.
MonorailIssue(
'chromium',
summary='test',
status='Untriaged',
description='body',
cc=['foo@chromium.org'],
labels=['Flaky'],
components=['Infra'])
def test_init_fills_project_id(self):
issue = MonorailIssue('chromium', summary='test', status='Untriaged')
self.assertEqual(issue.body['projectId'], 'chromium')
def test_unicode(self):
issue = MonorailIssue(
'chromium',
summary=u'test',
status='Untriaged',
description=u'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ',
cc=['foo@chromium.org', 'bar@chromium.org'],
labels=['Flaky'],
components=['Infra'])
self.assertEqual(type(unicode(issue)), unicode)
self.assertEqual(
unicode(issue),
(u'Monorail issue in project chromium\n'
u'Summary: test\n'
u'Status: Untriaged\n'
u'CC: foo@chromium.org, bar@chromium.org\n'
u'Components: Infra\n'
u'Labels: Flaky\n'
u'Description:\nABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ\n'))
def test_init_unknown_fields(self):
with self.assertRaises(AssertionError):
MonorailIssue('chromium', component='foo')
def test_init_missing_required_fields(self):
with self.assertRaises(AssertionError):
MonorailIssue('', summary='test', status='Untriaged')
with self.assertRaises(AssertionError):
MonorailIssue('chromium', summary='', status='Untriaged')
with self.assertRaises(AssertionError):
MonorailIssue('chromium', summary='test', status='')
def test_init_unknown_status(self):
with self.assertRaises(AssertionError):
MonorailIssue('chromium', summary='test', status='unknown')
def test_init_string_passed_for_list_fields(self):
with self.assertRaises(AssertionError):
MonorailIssue(
'chromium',
summary='test',
status='Untriaged',
cc='foo@chromium.org')
with self.assertRaises(AssertionError):
MonorailIssue(
'chromium',
summary='test',
status='Untriaged',
components='Infra')
with self.assertRaises(AssertionError):
MonorailIssue(
'chromium', summary='test', status='Untriaged', labels='Flaky')
def test_new_chromium_issue(self):
issue = MonorailIssue.new_chromium_issue(
'test',
description='body',
cc=['foo@chromium.org'],
components=['Infra'])
self.assertEqual(issue.project_id, 'chromium')
self.assertEqual(issue.body['summary'], 'test')
self.assertEqual(issue.body['description'], 'body')
self.assertEqual(issue.body['cc'], ['foo@chromium.org'])
self.assertEqual(issue.body['components'], ['Infra'])
def test_crbug_link(self):
self.assertEqual(
MonorailIssue.crbug_link(12345), 'https://crbug.com/12345')
class MonorailAPITest(unittest.TestCase):
def test_fix_cc_field_in_body(self):
original_body = {
'summary': 'test bug',
'cc': ['foo@chromium.org', 'bar@chromium.org']
}
# pylint: disable=protected-access
self.assertEqual(
MonorailAPI._fix_cc_in_body(original_body), {
'summary': 'test bug',
'cc': [{
'name': 'foo@chromium.org'
}, {
'name': 'bar@chromium.org'
}]
})
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
420a38deaafbfe305203da3b4483510a880f60ab
|
98bd2625dbcc955deb007a07129cce8b9edb3c79
|
/simulate_barseq_tnseq.py
|
85aaae7b4ea0ea4073de7d774c6a359a4df634fe
|
[] |
no_license
|
melanieabrams/bremdata
|
70d0a374ab5dff32f6d9bbe0a3959a617a90ffa8
|
df7a12c72a29cca4760333445fafe55bb6e40247
|
refs/heads/master
| 2021-12-26T01:57:25.684288
| 2021-09-30T22:48:05
| 2021-09-30T22:48:05
| 166,273,567
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,755
|
py
|
import regex
import numpy as np
import sys
import subprocess as sp
import random
# HELP #
if len(sys.argv) == 1:
print("USAGE: python3 simulate_barseq_tnseq out_directory fastq_file1 fastq_file2...")
exit()
# INPUT #
num_orig = 20 #number of unmodified reads to preserve. This will make sure my modified version for barseq of map-and-blat can still filter out reads w/o Tn.
num_new = 1000 # number of new reads
num_duplicate = 100 #number of new reads with duplicate barcodes
bc_length = 20 # number of nt of bc
genome_nt = 50 #number of nt of genome in simulated read
# BEGIN FUNCTIONS #
def generate_bc(length=20):
'''returns a random barcode of specified length'''
random_bc = ''
nucleotides = ['A','T','G','C']
for i in range(length):
random_bc+=random.choice(nucleotides)
return random_bc
def generate_read(genome_seq,barcode ='random'):
'''returns a simulated barseq read with P5 and P7 adaptors (and Rd1 and Rd2 universal sequence primer) and a chunk of genome'''
flanking_bc_left = 'AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCTNNNNNNAGTATGTAACCCTGATGTCCACGAGGTCTCT'
if barcode == 'random':
barcode= generate_bc()
flanking_bc_right = 'CGTACGCTGCAGGTCGACAACGTAAAACACATGCGTCAATTTTACGCATGATTATCTTTAACGTACGTCACAATATGATTATCTTTCTAGGGTTAA'
after_genomic = 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTC'
read = flanking_bc_left + barcode + flanking_bc_right + genome_seq + after_genomic
return read
def AddN20(fastq_file): ## add a random N20 plus primers to reads with transposon, so that normal Tn-Seq data looks like it was made with a barcode
wf = open(out_dir+fastq_filename+'_simulatedn20','w') # outfile for the fake-barcoded reads that will be mapped
line_count = 0
tn_count = 0
with open(fastq_file) as f:
head = [next(f) for x in range(4*(num_orig+num_new))]
for line in head:
line_count +=1
if line_count % 4 == 1:
header = line
elif line_count % 4 == 2:
read = line.strip()
elif line_count % 4 == 0:
nt_from_read =read[75:75+genome_nt]
if line_count >4*num_orig:
if line_count>4*(num_new-num_duplicate):
read = generate_read(nt_from_read,barcode='random')
else:
read = generate_read(nt_from_read,barcode='TATTGGAAAACTATAGGGAC')
wf.writelines(">simulatedBarSeq"+header)
wf.writelines(read+"\n")
#### START PROGRAM ####
out_dir = sys.argv[1]
read_files = sys.argv[2:]
for read_file in read_files:
fastq_filename = read_file.split("/")[-1]
AddN20(read_file)
|
[
"noreply@github.com"
] |
melanieabrams.noreply@github.com
|
da29fd239650f5e1ed7b3fbb80213b271705d874
|
edde333afca3ca4977bec7b38271d8c9e8448d85
|
/mirage/projectstartup/django_app_create.py
|
377edc7000d92c5c2d4e56add53610298c23d128
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
TrendingTechnology/mirage-django-lts
|
0219450155d9ce122196b66045de2bee13fa6bfd
|
b9d74006c1b64f5f5b33049b5a1701de58b478b3
|
refs/heads/master
| 2023-05-29T20:25:45.865865
| 2021-06-16T01:20:23
| 2021-06-16T01:20:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,119
|
py
|
# -*- coding: utf-8 -*-
"""
Copyright 2017-2020 Shota Shimazu.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from mirage import proj
from mirage.flow import Workflow
from mirage import system as mys
from mirage.template import readme_md, gitignore
from mirage.miragefile import source
class StartupWorkFlow(Workflow):
def constructor(self):
self._js_runtime = self._option
def main(self):
# Check
try:
self._check_before()
except:
return
# Input information
mys.log("Please type your new Django application information.")
# Check namespace
try:
self._project_name = mys.log("Project name", withInput = True)
self._check_namesapce(self._project_name)
except:
mys.log("Project \"{0}\" is already exists.".format(self._project_name), withError = True,
errorDetail = "Please remove duplication of Django project namespace.")
return
version = mys.log("App version", withInput = True, default = "0.0.1")
author = mys.log("Author name", withInput = True)
email = mys.log("Email", withInput = True)
git_url = mys.log("Git URL", withInput = True)
license_name = mys.log("License", withInput = True)
description = mys.log("Description", withInput = True)
copyrightor = mys.log("Copyrightor", withInput = True, default = author)
self._create_new_django_app()
# Create logging instance
logger = mys.Progress()
with proj.InDir("./" + self._project_name):
# Generate .gitignore
#log("Generating gitignore...")
logger.write("Generating gitignore...", withLazy = True)
self._create_template_git_project()
# Generate README.md
logger.update("Generating readme...", withLazy = True)
self._create_docs(description)
# Generate Miragefile
logger.update("Generating Miragefile...", withLazy = True)
self._create_miragefile(version, author, email, git_url, license_name, description, copyrightor)
# Add remote repo
logger.update("Adding remote repository...", withLazy = True)
mys.command("git remote add origin " + git_url)
# Completed
logger.update("Completed!")
def _create_new_django_app(self):
mys.command("django-admin startproject " + self._project_name)
def _create_miragefile(self, version, author, email, git_url, license_name, description, copyrightors):
with open("Miragefile", "w") as f:
f.write(source.create(self._project_name, version, author, email, git_url, license_name, description, copyrightors))
def _create_template_git_project(self):
ignorance = gitignore.src()
with open(".gitignore", "w") as f:
f.write(ignorance)
mys.command("git init")
def _create_docs(self, description):
with open("README.md", "a") as readme:
readme.write(readme_md.src(self._project_name, description))
def _check_before(self):
try:
import django
except ImportError:
mys.log("Failed to import Django!", withError = True,
errorDetail = "You have to install Django before creating a new Django project.")
raise ImportError
def _check_namesapce(self, name):
if os.path.exists(name):
raise FileExistsError
|
[
"hornet.live.mf@gmail.com"
] |
hornet.live.mf@gmail.com
|
4dedc840e56c94ed1dd1857f53ca4926ff01e49f
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/zelle_graphics/hello_world.py
|
7b041997c7b74d4bae882c7bb8f6ac20efcc7645
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164
| 2021-06-11T22:59:07
| 2021-06-11T22:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from graphics import *
win = GraphWin("My Circle", 100, 100)
c = Circle(Point(50, 50), 10)
c.draw(win)
win.getMouse() # Pause to view result
win.close()
|
[
"ilya.petrash@inbox.ru"
] |
ilya.petrash@inbox.ru
|
182d05ded57370c6cfa6cbc2097c846975a841d1
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/web/v20200901/list_web_app_function_secrets_slot.py
|
d97937e5b7161052acdff721dd2dc7821a150932
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 2,925
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListWebAppFunctionSecretsSlotResult',
'AwaitableListWebAppFunctionSecretsSlotResult',
'list_web_app_function_secrets_slot',
]
@pulumi.output_type
class ListWebAppFunctionSecretsSlotResult:
"""
Function secrets.
"""
def __init__(__self__, key=None, trigger_url=None):
if key and not isinstance(key, str):
raise TypeError("Expected argument 'key' to be a str")
pulumi.set(__self__, "key", key)
if trigger_url and not isinstance(trigger_url, str):
raise TypeError("Expected argument 'trigger_url' to be a str")
pulumi.set(__self__, "trigger_url", trigger_url)
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="triggerUrl")
def trigger_url(self) -> Optional[str]:
"""
Trigger URL.
"""
return pulumi.get(self, "trigger_url")
class AwaitableListWebAppFunctionSecretsSlotResult(ListWebAppFunctionSecretsSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppFunctionSecretsSlotResult(
key=self.key,
trigger_url=self.trigger_url)
def list_web_app_function_secrets_slot(function_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppFunctionSecretsSlotResult:
"""
Function secrets.
:param str function_name: Function name.
:param str name: Site name.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot.
"""
__args__ = dict()
__args__['functionName'] = function_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20200901:listWebAppFunctionSecretsSlot', __args__, opts=opts, typ=ListWebAppFunctionSecretsSlotResult).value
return AwaitableListWebAppFunctionSecretsSlotResult(
key=__ret__.key,
trigger_url=__ret__.trigger_url)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
8daeb7d575a3677e6cbd190d314c2986273f7bc5
|
6bd51065a8ecd097e7f80ee3c6acd16a083be350
|
/tensorflow/contrib/framework/__init__.py
|
8421ba7c0423c6ed274f92ba74930822d0171e05
|
[
"Apache-2.0"
] |
permissive
|
cglewis/tensorflow
|
29b50dadbdb599bacd06af960689bc518a472de1
|
6eac524ef63728bdc10c40f95d30c94aede5f4ea
|
refs/heads/master
| 2023-04-07T18:38:29.752739
| 2017-10-31T17:56:48
| 2017-10-31T17:56:48
| 109,033,012
| 0
| 0
|
Apache-2.0
| 2023-04-04T00:37:48
| 2017-10-31T17:54:48
|
C++
|
UTF-8
|
Python
| false
| false
| 2,622
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework utilities.
See the @{$python/contrib.framework} guide.
@@assert_same_float_dtype
@@assert_scalar
@@assert_scalar_int
@@convert_to_tensor_or_sparse_tensor
@@get_graph_from_inputs
@@is_numeric_tensor
@@is_non_decreasing
@@is_strictly_increasing
@@is_tensor
@@reduce_sum_n
@@remove_squeezable_dimensions
@@with_shape
@@with_same_shape
@@deprecated
@@deprecated_args
@@deprecated_arg_values
@@arg_scope
@@add_arg_scope
@@current_arg_scope
@@has_arg_scope
@@arg_scoped_arguments
@@prepend_name_scope
@@strip_name_scope
@@add_model_variable
@@assert_global_step
@@assert_or_get_global_step
@@assign_from_checkpoint
@@assign_from_checkpoint_fn
@@assign_from_values
@@assign_from_values_fn
@@create_global_step
@@filter_variables
@@get_global_step
@@get_or_create_global_step
@@get_local_variables
@@get_model_variables
@@get_name_scope
@@get_trainable_variables
@@get_unique_variable
@@get_variables_by_name
@@get_variables_by_suffix
@@get_variable_full_name
@@get_variables_to_restore
@@get_variables
@@local_variable
@@model_variable
@@variable
@@VariableDeviceChooser
@@zero_initializer
@@load_checkpoint
@@list_variables
@@load_variable
@@init_from_checkpoint
@@load_and_remap_matrix_initializer
@@load_embedding_initializer
@@load_linear_multiclass_bias_initializer
@@load_variable_slot_initializer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.framework.python.framework import *
from tensorflow.contrib.framework.python.ops import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.framework.ops import prepend_name_scope
from tensorflow.python.framework.ops import strip_name_scope
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['nest']
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
4bb739f59e43b16c125bc8bb2a99540f52ebf7a0
|
58e4c3e1302a97e781b5657764fdde3e8dd48708
|
/no_if_required.py
|
0644af70c34260fd2a4fc9ef39a1b2891b776aa4
|
[] |
no_license
|
bgroveben/coursera_LTP_TF
|
05ebf73991f73a98360ffbde685f24f6c68d3968
|
f96bd2d19316713b496979df63d5ebec2161c722
|
refs/heads/master
| 2020-02-26T15:40:04.082856
| 2017-03-01T13:52:01
| 2017-03-01T13:52:01
| 70,072,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
# Booleans are your friend.
# Why do this:
def is_even(num):
""" (int) -> bool
Return whether number is even.
>>> is_even(1)
False
>>> is_even(2)
True
"""
if num % 2 == 0:
return True
else:
return False
print(is_even(1))
print(is_even(2))
# When you can do this:
def is_even_bool(num):
""" (int) -> bool
Return whether number is even.
>>> is_even_bool(1)
False
>>> is_even_bool(2)
True
"""
return num % 2 == 0
print(is_even_bool(1))
print(is_even_bool(2))
|
[
"bgroveben@gmail.com"
] |
bgroveben@gmail.com
|
78f2cb1b699f7ef5beaaeed03f0c6df3b2382e73
|
d9a490dc36da08051b2685489a8e6af3d29fa903
|
/gaussNodes.py
|
396b5e58af12eae8c24a9c007102e128565535cc
|
[] |
no_license
|
freephys/numeric-for-engineer
|
403679c3f055164bf8b7097c360ad8bfc2cb9978
|
a98d318e8cdff679cc02a575d32840fa87a4717d
|
refs/heads/master
| 2020-04-16T01:33:43.530839
| 2009-11-28T18:42:12
| 2009-11-28T18:42:12
| 388,559
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
## module gaussNodes
''' x,A = gaussNodes(m,tol=10e-9)
Returns nodal abscissas {x} and weights {A} of
Gauss-Legendre m-point quadrature.
'''
from math import cos,pi
from numarray import zeros,Float64
def gaussNodes(m,tol=10e-9):
def legendre(t,m):
p0 = 1.0; p1 = t
for k in range(1,m):
p = ((2.0*k + 1.0)*t*p1 - k*p0)/(1.0 + k )
p0 = p1; p1 = p
dp = m*(p0 - t*p1)/(1.0 - t**2)
return p,dp
A = zeros((m),type=Float64)
x = zeros((m),type=Float64)
nRoots = (m + 1)/2 # Number of non-neg. roots
for i in range(nRoots):
t = cos(pi*(i + 0.75)/(m + 0.5)) # Approx. root
for j in range(30):
p,dp = legendre(t,m) # Newton-Raphson
dt = -p/dp; t = t + dt # method
if abs(dt) < tol:
x[i] = t; x[m-i-1] = -t
A[i] = 2.0/(1.0 - t**2)/(dp**2) # Eq.(6.25)
A[m-i-1] = A[i]
break
return x,A
|
[
"freephys@gmail.com"
] |
freephys@gmail.com
|
914136d9cb630ffafb4876af0629d835fdf2852d
|
7839d009f3ae0a0c1bc360b86756eba80fce284d
|
/build/rostime/catkin_generated/generate_cached_setup.py
|
35bb758cd62d38d6d794504e7e737ab1ac541e5b
|
[] |
no_license
|
abhat91/ros_osx
|
b5022daea0b6fdaae3489a97fdb1793b669e64f5
|
39cd8a79788d437927a24fab05a0e8ac64b3fb33
|
refs/heads/master
| 2021-01-10T14:43:41.047439
| 2016-03-13T23:18:59
| 2016-03-13T23:18:59
| 53,812,264
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/jade/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/jade/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/jade".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/site-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/Users/adityabhat/Downloads/devel/env.sh')
output_filename = '/Users/adityabhat/Downloads/build/rostime/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"abhat@wpi.edu"
] |
abhat@wpi.edu
|
382fe6b2d5bbcfdf0985153ae02dac0e9df70625
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/python/estimator/canned/metric_keys.pyi
|
10d9b385f98a4f8bc2d53253b192caa053ff2447
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675
| 2019-05-15T06:21:43
| 2019-05-15T06:21:43
| 186,748,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
pyi
|
# Stubs for tensorflow.python.estimator.canned.metric_keys (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.estimator import model_fn as model_fn
from typing import Any as Any
class MetricKeys:
LOSS: Any = ...
LOSS_MEAN: Any = ...
LOSS_REGULARIZATION: str = ...
ACCURACY: str = ...
PRECISION: str = ...
RECALL: str = ...
ACCURACY_BASELINE: str = ...
AUC: str = ...
AUC_PR: str = ...
LABEL_MEAN: str = ...
PREDICTION_MEAN: str = ...
ACCURACY_AT_THRESHOLD: str = ...
PRECISION_AT_THRESHOLD: str = ...
RECALL_AT_THRESHOLD: str = ...
PROBABILITY_MEAN_AT_CLASS: str = ...
AUC_AT_CLASS: str = ...
AUC_PR_AT_CLASS: str = ...
PROBABILITY_MEAN_AT_NAME: str = ...
AUC_AT_NAME: str = ...
AUC_PR_AT_NAME: str = ...
|
[
"matangover@gmail.com"
] |
matangover@gmail.com
|
f26f4be9786d1d6d93f78cd9342425b3d05c88fc
|
99c9ca6edd44a13fd4eabee78625c827cc535ea1
|
/examples/english/english_experiment.py
|
911a51f8596bb8082423e0c2ed51e9a07dfd52f2
|
[
"Apache-2.0"
] |
permissive
|
adeepH/MUDES
|
bbcdcac41b33990545eac769d127a37ba5f4566f
|
f2f7413f9c683194253f7ea9286587bad3058396
|
refs/heads/master
| 2023-04-04T07:26:19.917166
| 2021-04-16T11:51:59
| 2021-04-16T11:51:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,320
|
py
|
import os
import statistics
from sklearn.model_selection import train_test_split
from examples.english.transformer_configs import transformer_config, MODEL_TYPE, MODEL_NAME, LANGUAGE_FINETUNE, \
language_modeling_args, TEMP_DIRECTORY
from mudes.algo.evaluation import f1
from mudes.algo.mudes_model import MUDESModel
from mudes.algo.language_modeling import LanguageModelingModel
from mudes.algo.predict import predict_spans
from mudes.algo.preprocess import read_datafile, format_data, format_lm, read_test_datafile
import torch
if not os.path.exists(TEMP_DIRECTORY):
os.makedirs(TEMP_DIRECTORY)
train = read_datafile('examples/english/data/tsd_train.csv')
dev = read_datafile('examples//english/data/tsd_trial.csv')
test = read_test_datafile('examples//english/data/tsd_test.csv')
if LANGUAGE_FINETUNE:
train_list = format_lm(train)
dev_list = format_lm(dev)
complete_list = train_list + dev_list
lm_train = complete_list[0: int(len(complete_list)*0.8)]
lm_test = complete_list[-int(len(complete_list)*0.2):]
with open(os.path.join(TEMP_DIRECTORY, "lm_train.txt"), 'w') as f:
for item in lm_train:
f.write("%s\n" % item)
with open(os.path.join(TEMP_DIRECTORY, "lm_test.txt"), 'w') as f:
for item in lm_test:
f.write("%s\n" % item)
model = LanguageModelingModel("auto", MODEL_NAME, args=language_modeling_args, use_cuda=torch.cuda.is_available())
model.train_model(os.path.join(TEMP_DIRECTORY, "lm_train.txt"), eval_file=os.path.join(TEMP_DIRECTORY, "lm_test.txt"))
MODEL_NAME = language_modeling_args["best_model_dir"]
train_df = format_data(train)
tags = train_df['labels'].unique().tolist()
model = MUDESModel(MODEL_TYPE, MODEL_NAME, labels=tags, args=transformer_config)
if transformer_config["evaluate_during_training"]:
train_df, eval_df = train_test_split(train_df, test_size=0.1, shuffle=False)
model.train_model(train_df, eval_df=eval_df)
else:
model.train_model(train_df)
model = MUDESModel(MODEL_TYPE, transformer_config["best_model_dir"], labels=tags, args=transformer_config)
scores = []
for n, (spans, text) in enumerate(dev):
predictions = predict_spans(model, text)
score = f1(predictions, spans)
scores.append(score)
print('avg F1 %g' % statistics.mean(scores))
|
[
"rhtdranasinghe@gmail.com"
] |
rhtdranasinghe@gmail.com
|
7d7114b73f7531d5ead27980f9e0b3608c42a9a3
|
c64dd4b7f67d1f3c6ade8404831676a3652963e4
|
/dask_drmaa/sge.py
|
0ec11dacb016ef01325f45ed1814c9faf8b51b02
|
[] |
no_license
|
mrocklin/dask-drmaa
|
6921a59bf29fd2c5e082dd0aad9bdf1f1e0f1806
|
71bd87c8c11a759f2495139e3b613421e7ba4986
|
refs/heads/master
| 2021-05-01T20:29:12.752914
| 2017-01-18T13:20:26
| 2017-01-18T13:20:26
| 79,346,877
| 3
| 0
| null | 2017-01-18T14:18:45
| 2017-01-18T14:18:45
| null |
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
from .core import DRMAACluster, get_session
class SGECluster(DRMAACluster):
default_memory = None
default_memory_fraction = 0.6
def createJobTemplate(self, nativeSpecification='', cpus=1, memory=None,
memory_fraction=None):
memory = memory or self.default_memory
memory_fraction = memory_fraction or self.default_memory_fraction
args = self.args
ns = self.nativeSpecification
if nativeSpecification:
ns = ns + nativeSpecification
if memory:
args = args + ['--memory-limit', str(memory * memory_fraction)]
args = args + ['--resources', 'memory=%f' % (memory * 0.8)]
ns += ' -l h_vmem=%dG' % int(memory / 1e9) # / cpus
if cpus:
args = args + ['--nprocs', '1', '--nthreads', str(cpus)]
# ns += ' -l TODO=%d' % (cpu + 1)
ns += ' -l h_rt={}'.format(self.max_runtime)
wt = get_session().createJobTemplate()
wt.jobName = self.jobName
wt.remoteCommand = self.remoteCommand
wt.args = args
wt.outputPath = self.outputPath
wt.errorPath = self.errorPath
wt.nativeSpecification = ns
return wt
|
[
"mrocklin@gmail.com"
] |
mrocklin@gmail.com
|
d2c7ab03478503220cdf8c286f1feb0daed10e8a
|
cad5b92686d48a2e06766b5d3d671eb41083b825
|
/microcosm_pubsub/tests/test_decorators.py
|
b0e88c4ccf382cd9d7abde003189d5cc6f2b313e
|
[
"Apache-2.0"
] |
permissive
|
lior001/microcosm-pubsub
|
8166b4596c04d78330f2ceca31f2827d272ec6ae
|
eeea8409c1f89a6c420fdf42afcc92b1d69d0e11
|
refs/heads/develop
| 2020-12-31T00:09:54.584041
| 2017-02-08T18:51:23
| 2017-02-08T18:51:23
| 86,563,543
| 0
| 0
| null | 2017-03-29T09:29:54
| 2017-03-29T09:29:54
| null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
"""
Decorator tests.
"""
from hamcrest import (
assert_that,
equal_to,
instance_of,
is_,
)
from marshmallow import fields, Schema
from microcosm.api import create_object_graph
from microcosm_pubsub.decorators import handles, schema
@schema
class TestSchema(Schema):
MEDIA_TYPE = "test"
test = fields.String()
@handles(TestSchema)
def noop_handler(message):
return True
class TestDecorators(object):
def setup(self):
self.graph = create_object_graph("test")
self.graph.use(
"pubsub_message_schema_registry",
"sqs_message_handler_registry",
)
def test_schema_decorators(self):
assert_that(
self.graph.pubsub_message_schema_registry[TestSchema.MEDIA_TYPE].schema,
is_(instance_of(TestSchema)),
)
def test_handles_decorators(self):
assert_that(
self.graph.sqs_message_handler_registry[TestSchema.MEDIA_TYPE],
is_(equal_to(noop_handler)),
)
|
[
"jesse.myers@globality.com"
] |
jesse.myers@globality.com
|
c0433ae54e1875d6032f2bb5e76a991006e302f1
|
a867b1c9da10a93136550c767c45e0d8c98f5675
|
/LC_yelp_14_Longest_Common_Prefix.py
|
d27e5697bb4aff657ed6b1ef3ff7795167d9a246
|
[] |
no_license
|
Omkar02/FAANG
|
f747aacc938bf747129b8ff35b6648fb265d95b6
|
ee9b245aa83ea58aa67954ab96442561dbe68d06
|
refs/heads/master
| 2023-03-25T19:45:08.153403
| 2021-03-28T07:13:08
| 2021-03-28T07:13:08
| 280,783,785
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='String', Difficult='Easy')
def longoestCommonPrefix(strs):
strs = list(zip(*strs))
ans = ''
for s in strs:
if len(set(s)) == 1:
ans += s[0]
return ans if ans else -1
strs = ["flower", "flow", "flight"]
# strs = ["dog", "racecar", "car"]
print(longoestCommonPrefix(strs))
|
[
"omkarjoshi4031@live.com"
] |
omkarjoshi4031@live.com
|
dcffb14f17f4fb9194a97de500c404736ef0cec9
|
edfd1db2b48d4d225bc58be32fbe372a43415112
|
/3. Airflow Fundamentals 3/exercises/lesson3.exercise6.py
|
e5c543342b90982555c56e162aa19390c2e9af9f
|
[] |
no_license
|
rwidjojo/airflow-training
|
ed83cb9e97ca85ef06de1426f2f41014881a1f22
|
ac82040d8ddc3859df5576eee08d397e824016f1
|
refs/heads/main
| 2023-08-12T21:01:17.672059
| 2021-01-04T09:17:48
| 2021-01-04T09:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
import airflow
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash_operator import BashOperator
owner = 'john_doe' # Replace with your short name
default_args = {
'owner': owner,
'depends_on_past': False,
'start_date': days_ago(2),
}
dag = DAG(
dag_id=f'{owner}.lesson3.excercise6',
start_date=airflow.utils.dates.days_ago(3),
schedule_interval=None,
)
print_template = BashOperator(
task_id="print_template",
bash_command='echo "execution date is {{ ts }} with year {{ execution_date.year }} and month {{ \'{:02}\'.format(execution_date.month) }}"',
dag=dag,
)
|
[
"nurcahyopujo@gmail.com"
] |
nurcahyopujo@gmail.com
|
3e4ac973d2c8a00ad85ba4f40d23f66a548805d7
|
e89f44632effe9ba82b940c7721cad19a32b8a94
|
/text2shorthand/shorthand/svsd/nakatta.py
|
23f660a4c9c066fff1b7352e466d935597403401
|
[] |
no_license
|
Wyess/text2shorthand
|
3bcdb708f1d7eeb17f9ae3181c4dd70c65c8986e
|
5ba361c716178fc3b7e68ab1ae724a57cf3a5d0b
|
refs/heads/master
| 2020-05-17T14:52:11.369058
| 2019-08-20T12:50:00
| 2019-08-20T12:50:00
| 183,776,467
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
from ..svsd.char import SvsdChar
from text2shorthand.common.point import Point as P, PPoint as PP
import pyx
from pyx.metapost.path import (
beginknot,
knot,
endknot,
smoothknot,
tensioncurve,
controlcurve,
curve)
class CharNakatta(SvsdChar):
def __init__(self, name='なかった', kana='nakatta',
model='NER10SWL5UNR2', head_type='NER', tail_type='SER', soundmark=''):
super().__init__(name, kana, model, head_type, tail_type, soundmark)
self.head_ligature = {}
#self.tail_ligature = {}
@classmethod
def path_NERSWLUNR(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRe(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRer(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRne(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRner(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRnel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRs(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsl(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRse(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRser(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRsw(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRswr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NERSWLUNRswl(cls, ta=None, **kwargs):
pass
|
[
"diyhacker@mail.goo.ne.jp"
] |
diyhacker@mail.goo.ne.jp
|
c1d36397e7e64ebf831efb5633fa13e307f25556
|
f883b2ccb4bf6d527f31fca1f1748e8aa5f17f3a
|
/web/app/social_auth/urls.py
|
2a6e7402dd2d88391b4a3b7b449e536511f5e311
|
[] |
no_license
|
nikolaykhodov/liketools
|
a710faa7fe31cd72df8299829bcc89d16a8d2721
|
65b4a046c3180eec3af0fa709f23bb12975dfe1c
|
refs/heads/master
| 2021-01-10T18:35:08.586890
| 2014-03-31T12:41:50
| 2014-03-31T12:41:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from social_auth.views import VkAuthView, LoginView, LogoutView, KvSetView
urlpatterns = patterns('',
url(r'^login/$', LoginView.as_view(), name='social_auth_login'),
url(r'^logout/$', LogoutView.as_view(), name='social_auth_logout'),
url(r'^vk/$', VkAuthView.as_view(), name='social_auth_vk'),
url(r'^keyvalue_set/$', KvSetView.as_view(), name='keyvalue_set'),
)
|
[
"nkhodov@gmail.com"
] |
nkhodov@gmail.com
|
65c5dc7ecc967754e6eb46de86e6f915461f2ea1
|
d0af9f544b76e1df4f8ffb6c65a3da1fe13c5871
|
/setup.py
|
c10007500e179356be10008723c3f04774009beb
|
[
"MIT"
] |
permissive
|
vanadium23/doc484
|
472a90ad08352891aa3ed9526375aebad71f3d16
|
ff8058f07e6cba8f26e7ce48ef4dd42203dc065a
|
refs/heads/master
| 2020-03-29T20:54:44.503542
| 2018-09-25T22:55:08
| 2018-09-25T22:55:08
| 150,338,332
| 0
| 0
|
MIT
| 2018-09-25T22:48:16
| 2018-09-25T22:48:16
| null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
from setuptools import setup, find_packages
import os.path
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with open(os.path.join(HERE, *parts)) as f:
return f.read()
setup(
name="doc484",
version="0.2.0",
author="Chad Dombrova",
description="Generate PEP 484 type comments from docstrings",
long_description=read("README.rst"),
license="MIT",
keywords=["mypy", "typing", "pep484", "docstrings", "annotations"],
url="https://github.com/chadrik/doc484",
packages=find_packages(),
entry_points={
'console_scripts': ['doc484=doc484.__main__:main'],
},
install_requires=[
"docutils", # only required for rest format
],
extras_require={
"tests": [
"coverage",
"pytest==3.6.2",
"tox==2.7.0",
],
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
[
"chadrik@gmail.com"
] |
chadrik@gmail.com
|
fad7b7f80e12f72ca4a0827c794f7b6a156be69f
|
7eb606a7957e5500f163c93dc4b19418cf9cf335
|
/examples/lbfgs/model.py
|
d68967e5e2c64b3f598e36263ab1e7edc2e6d907
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ludwig-ai/ludwig
|
024f74da86567a57ec8e30efcb4600f0c52333a1
|
e1d023e41606c9b76b35e1d231c2f13368a30eca
|
refs/heads/master
| 2023-09-03T08:07:32.978301
| 2023-09-01T19:39:32
| 2023-09-01T19:39:32
| 163,346,054
| 2,567
| 285
|
Apache-2.0
| 2023-09-14T20:34:52
| 2018-12-27T23:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
import logging
import pandas as pd
from ludwig.api import LudwigModel
from ludwig.datasets import amazon_employee_access_challenge
df = amazon_employee_access_challenge.load()
model = LudwigModel(config="config.yaml", logging_level=logging.INFO)
training_statistics, preprocessed_data, output_directory = model.train(
df,
skip_save_processed_input=True,
skip_save_log=True,
skip_save_progress=True,
skip_save_training_description=True,
skip_save_training_statistics=True,
)
# Predict on unlabeled test
config = model.config
config["preprocessing"] = {}
model.config = config
unlabeled_test = df[df.split == 2].reset_index(drop=True)
preds, _ = model.predict(unlabeled_test)
# Save predictions to csv
action = preds.ACTION_probabilities_True
submission = pd.merge(unlabeled_test.reset_index(drop=True).id.astype(int), action, left_index=True, right_index=True)
submission.rename(columns={"ACTION_probabilities_True": "Action", "id": "Id"}, inplace=True)
submission.to_csv("submission.csv", index=False)
|
[
"noreply@github.com"
] |
ludwig-ai.noreply@github.com
|
a5124967f629b267a5314e52eda661da43dc0c9a
|
aba1d17ddc7d7ad9f49e2d6d87600e9e0387ba14
|
/mi/dataset/driver/dosta_abcdjm/cspp/dosta_abcdjm_cspp_telemetered_driver.py
|
6f3ee50a7855782d946f895365a65c2d76667fb4
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
oceanobservatories/mi-instrument
|
3ad880c1366b1a8461fc9085768df0e9ddeb6ef5
|
bdbf01f5614e7188ce19596704794466e5683b30
|
refs/heads/master
| 2023-07-23T07:28:36.091223
| 2023-07-14T15:54:49
| 2023-07-14T15:54:49
| 24,165,325
| 1
| 32
|
BSD-2-Clause
| 2023-07-13T01:39:22
| 2014-09-17T22:53:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
"""
@package mi.dataset.driver.dosta_abcdjm.cspp
@file mi.dataset.driver.dosta_abcdjm.cspp.dosta_abcdjm_cspp_telemetered_driver.py
@author Emily Hahn
@brief Telemetered driver for the dosta series abcdjm instrument through cspp
"""
__author__ = 'ehahn'
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.cspp_base import METADATA_PARTICLE_CLASS_KEY, DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.dosta_abcdjm_cspp import DostaAbcdjmCsppParser, \
DostaAbcdjmCsppMetadataTelemeteredDataParticle, \
DostaAbcdjmCsppInstrumentTelemeteredDataParticle
from mi.core.versioning import version
@version("15.7.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rU') as stream_handle:
# create and instance of the concrete driver class defined below
driver = DostaAbcdjmCsppTelemeteredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class DostaAbcdjmCsppTelemeteredDriver(SimpleDatasetDriver):
"""
This class just needs to create the _build_parser method of the SimpleDatasetDriver
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_abcdjm_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppInstrumentTelemeteredDataParticle,
}
}
return DostaAbcdjmCsppParser(parser_config, stream_handle, self._exception_callback)
|
[
"petercable@gmail.com"
] |
petercable@gmail.com
|
b9f78075d182ca9e57ef766de03b49e5e67b83e3
|
5ae3bc1920fafc33693cdfa3928a48158aa6f725
|
/339/339.py
|
3ae587496c391a69889871704c55b71a5fa45463
|
[] |
no_license
|
sjzyjc/leetcode
|
2d0764aec6681d567bffd8ff9a8cc482c44336c2
|
5e09a5d36ac55d782628a888ad57d48e234b61ac
|
refs/heads/master
| 2021-04-03T08:26:38.232218
| 2019-08-15T21:54:59
| 2019-08-15T21:54:59
| 124,685,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,912
|
py
|
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger:
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution:
def depthSum(self, nestedList):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
if not nestedList:
return 0
ans = 0
for item in nestedList:
ans += self.dfs(item, 1)
return ans
def dfs(self, nestedList, depth):
if nestedList.isInteger():
return nestedList.getInteger() * depth
ans = 0
for item in nestedList.getList():
ans += self.dfs(item, depth + 1)
return ans
|
[
"jcyang@MacBook-Air.local"
] |
jcyang@MacBook-Air.local
|
264486e6a67a5bd97d1e2a4f5fe5a9d2793e581e
|
60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14
|
/beginner_contest/072/C.py
|
080a37ff02695427c7499db8c51223bd96de0bd5
|
[
"MIT"
] |
permissive
|
FGtatsuro/myatcoder
|
12a9daafc88efbb60fc0cd8840e594500fc3ee55
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
refs/heads/master
| 2021-06-13T15:24:07.906742
| 2021-05-16T11:47:09
| 2021-05-16T11:47:09
| 195,441,531
| 0
| 0
|
MIT
| 2021-05-16T11:47:10
| 2019-07-05T16:47:58
|
Python
|
UTF-8
|
Python
| false
| false
| 320
|
py
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
a = list(map(int, input().split()))
t = [0] * (max(a) + 1)
for v in a:
t[v] += 1
if max(a) <= 1:
print(sum(t))
sys.exit(0)
ans = 0
for i in range(0, max(a) - 1):
ans = max(ans, t[i] + t[i+1] + t[i+2])
print(ans)
|
[
"204491+FGtatsuro@users.noreply.github.com"
] |
204491+FGtatsuro@users.noreply.github.com
|
0c52af3d9af2fa22e731c5bf98e9226c2a7b2245
|
b2b79cc61101ddf54959b15cf7d0887d114fb4e5
|
/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_get_script_sql.py
|
bfd9c1ad7926ca39adec0ae5ed46e473f8192970
|
[
"PostgreSQL"
] |
permissive
|
99Percent/pgadmin4
|
8afe737eb2ec1400ab034ad1d8a4f7c4ba4c35c8
|
5e0c113c7bc4ffefbec569e7ca5416d9acf9dd8a
|
refs/heads/master
| 2021-10-10T20:08:48.321551
| 2021-09-30T12:51:43
| 2021-09-30T12:51:43
| 165,702,958
| 0
| 0
|
NOASSERTION
| 2019-01-14T17:18:40
| 2019-01-14T17:18:39
| null |
UTF-8
|
Python
| false
| false
| 4,072
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2021, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as tables_utils
class TableGetScriptSqlTestCase(BaseTestGenerator):
"""This class will add new collation under schema node."""
url = '/browser/table/'
# Generates scenarios
scenarios = utils.generate_scenarios("table_get_script_sql",
tables_utils.test_cases)
def setUp(self):
# Load test data
self.data = self.test_data
# Update url
self.url = self.url + self.add_to_url
# Create db connection
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a table.")
# Create schema
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a table.")
# Create table
self.table_name = "test_table_get_%s" % (str(uuid.uuid4())[1:8])
if "query" in self.inventory_data:
custom_query = self.inventory_data["query"]
self.table_id = tables_utils.create_table(self.server,
self.db_name,
self.schema_name,
self.table_name,
custom_query)
else:
self.table_id = tables_utils.create_table(self.server,
self.db_name,
self.schema_name,
self.table_name)
def runTest(self):
"""This function will delete added table under schema node."""
if self.is_positive_test:
response = tables_utils.api_get(self)
# Assert response
utils.assert_status_code(self, response)
else:
if self.mocking_required:
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
if self.is_list:
response = tables_utils.api_get(self, "")
else:
response = tables_utils.api_get(self)
else:
if 'table_id' in self.data:
self.table_id = self.data['table_id']
response = tables_utils.api_get(self)
# Assert response
utils.assert_status_code(self, response)
utils.assert_error_message(self, response)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
|
[
"akshay.joshi@enterprisedb.com"
] |
akshay.joshi@enterprisedb.com
|
48fae7769117066bc7dbba45df7955795e600155
|
1677eaad65da601a3ac34bd6648c973ffd23c5a9
|
/test/test_payment_intent.py
|
96a16f7f0e080de4c3970d1af68af0c32eebc622
|
[] |
no_license
|
jeffkynaston/sdk-spike-python
|
dc557cc1557387f8a126cd8e546201d141de535e
|
f9c65f578abb801ffe5389b2680f9c6ed1fcebd3
|
refs/heads/main
| 2023-07-10T00:58:13.864373
| 2021-08-05T21:38:07
| 2021-08-05T21:38:07
| 393,175,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
"""
Plastiq Public API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.payer_id import PayerId
from openapi_client.model.payment_details import PaymentDetails
from openapi_client.model.payment_intent_fees import PaymentIntentFees
from openapi_client.model.payment_method_id import PaymentMethodId
from openapi_client.model.recipient_id import RecipientId
globals()['PayerId'] = PayerId
globals()['PaymentDetails'] = PaymentDetails
globals()['PaymentIntentFees'] = PaymentIntentFees
globals()['PaymentMethodId'] = PaymentMethodId
globals()['RecipientId'] = RecipientId
from openapi_client.model.payment_intent import PaymentIntent
class TestPaymentIntent(unittest.TestCase):
"""PaymentIntent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPaymentIntent(self):
"""Test PaymentIntent"""
# FIXME: construct object with mandatory attributes with example values
# model = PaymentIntent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"jeff.kynaston@plastiq.com"
] |
jeff.kynaston@plastiq.com
|
c4b8382cde9c3442aad8576c605f4e3165a4187c
|
cb3583cc1322d38b1ee05cb1c081e0867ddb2220
|
/home/0024_auto_20210409_1103.py
|
6dfdc8917586768f05e72645b5ee96b68f7f94cc
|
[
"MIT"
] |
permissive
|
iamgaddiel/codeupblood
|
9e897ff23dedf5299cb59fd6c44d9bd8a645e9c6
|
a0aa1725e5776d80e083b6d4e9e67476bb97e983
|
refs/heads/main
| 2023-05-07T23:34:27.475043
| 2021-04-24T20:49:08
| 2021-04-24T20:49:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
# Generated by Django 3.1.6 on 2021-04-09 18:03
from django.db import migrations, models
import tagulous.models.fields
import tagulous.models.models
class Migration(migrations.Migration):
dependencies = [
('home', '0023_auto_20210409_0900'),
]
operations = [
migrations.AlterField(
model_name='partner',
name='class_id',
field=models.CharField(default='pMNit', max_length=150),
),
migrations.AlterField(
model_name='sponsor',
name='class_id',
field=models.CharField(default='eLZfH', max_length=150),
),
migrations.CreateModel(
name='Tagulous_Blog_tags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField()),
('count', models.IntegerField(default=0, help_text='Internal counter of how many times this tag is in use')),
('protected', models.BooleanField(default=False, help_text='Will not be deleted when the count reaches 0')),
],
options={
'ordering': ('name',),
'abstract': False,
'unique_together': {('slug',)},
},
bases=(tagulous.models.models.BaseTagModel, models.Model),
),
migrations.AlterField(
model_name='blog',
name='tags',
field=tagulous.models.fields.TagField(_set_tag_meta=True, help_text='Enter a comma-separated tag string', to='home.Tagulous_Blog_tags'),
),
]
|
[
"www.spbiology@gmail.com"
] |
www.spbiology@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.