blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cff524845cd8cd7c51e340615ab03f93dd2e8f56
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2464/60624/263253.py
|
43b61c2b54734e6e699622b0d037ab4ce3242c40
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
def func8():
target = int(input())
nums = list(map(int, input().split(",")))
low, high, res = 0, len(nums), 0
def helper(size):
sum_size = 0
for i in range(len(nums)):
sum_size += nums[i]
if i >= size:
sum_size -= nums[i-size]
if sum_size >= target:
return True
return False
while low <= high:
mid = (low+high)//2
if helper(mid):
res = mid
high = mid-1
else:
low = mid+1
print(res)
return
func8()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
71095ab94c5e4ddec9a1d84902c4dd9e3bef9571
|
8d90e2eae476ecbe88d46ef2f03fe7ba92cc733b
|
/Programming Basics with Python/For-cycle/For_C_lab_ex6_sum_of_vowels.py
|
39e46fed5101d534882c2b874c0d0b2764c03068
|
[] |
no_license
|
KaterinaMutafova/SoftUni
|
c3f8bae3c2bf7bd4038da010ca03edc412672468
|
7aeef6f25c3479a8d677676cb1d66df20ca0d411
|
refs/heads/main
| 2023-03-08T10:53:49.748153
| 2021-02-19T15:55:13
| 2021-02-19T15:55:13
| 317,597,660
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
text = input()
result = 0
for i in text:
if i == "a":
result += 1
elif i == "e":
result += 2
elif i == "i":
result += 3
elif i == "o":
result += 4
elif i == "u":
result += 5
print(result)
|
[
"noreply@github.com"
] |
KaterinaMutafova.noreply@github.com
|
67d1221bfdb2b6a345db86fe818fdbf3895b92fb
|
1c72aa6d53c886d8fb8ae41a3e9b9c6c4dd9dc6f
|
/Semester 1/Project submissions/Lee Eldridge/Excercise Weeks 1-9 - Lee Eldridge/Week 7/url_reader.py
|
e2cc32fffbe071db0b3e936c95c54fccaf8b2641
|
[] |
no_license
|
codebubb/python_course
|
74761ce3189d67e3aff964c056aeab27d4e94d4a
|
4a6ed4a64e6a726d886add8364c65956d5053fc2
|
refs/heads/master
| 2021-01-11T03:06:50.519208
| 2016-07-29T10:47:12
| 2016-10-17T10:42:29
| 71,114,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
import time
import urllib2
open_file = urllib2.urlopen('http://wordpress.org/plugins/about/readme.txt', 'r')
read = open_file.read()
file_list = read.split()
print read
print ""
print "Hello, currently doing some analysis, please wait..."
time.sleep(3)
print "There are currently:", len(file_list), "words in the above text"
print "There are currently:", len(set(file_list)), "unique words in the above text"
count = 0
for e in file_list:
count = len(e) + count
print "There are currently:", count, "letters in the above text."
|
[
"jpbubb82@gmail.com"
] |
jpbubb82@gmail.com
|
2ae47ede9827fbde591f754ac58ffc0dc2fac0d9
|
1ac99f8065a2646bdb8ea9003fd5930341fb0cf4
|
/Exam2/3.py
|
096cd041d3b9738fdbd0949fc47c080513e7b1e3
|
[] |
no_license
|
krishnanunni-pr/Pyrhon-Django
|
894547f3d4d22dce3fff14e88815122c12c145b5
|
c59471f947ceb103bb27a19e8a2a160e8ada529b
|
refs/heads/master
| 2023-07-29T19:40:38.199104
| 2021-09-09T18:12:24
| 2021-09-09T18:12:24
| 385,128,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
# 3. Create a Book class with instance Library_name, book_name, author, pages?
class Book:
def bookdetails(self,bkname,author,pages,library_name):
self.library_name=library_name
self.bkname=bkname
self.author=author
self.pages=pages
print("Book name :",bkname)
print("Authour :",author)
print("Number of pages :",pages)
print("Library section :",library_name)
obj=Book()
bname=input("Enter the name of book :")
author=input("Name of authour :")
pageno=int(input("Number of pages :"))
library_name=input("Enter library name :")
obj.bookdetails(bname,author,pageno,library_name)
|
[
"krishna@gmail.com"
] |
krishna@gmail.com
|
3b83024a4da9cdde39c50333316b03838417d3a9
|
eac55c1fbbf83f08eabdfd5337ae54ca24ed655b
|
/build/velodyne/velodyne_msgs/catkin_generated/pkg.develspace.context.pc.py
|
85c1b12f52780f9af793f52565600c24d7496253
|
[] |
no_license
|
codeJRV/velodyne_ws
|
c4271d81de66ee354f9c948aa961f56266e74e55
|
ae684357b2d3f0ddc8a327cd9f625f727d02e145
|
refs/heads/master
| 2021-01-25T14:03:39.322967
| 2018-03-15T23:16:36
| 2018-03-15T23:16:36
| 123,643,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/jrv/Research/Velodyne/velodyne_ws/devel/include".split(';') if "/home/jrv/Research/Velodyne/velodyne_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "velodyne_msgs"
PROJECT_SPACE_DIR = "/home/jrv/Research/Velodyne/velodyne_ws/devel"
PROJECT_VERSION = "1.3.0"
|
[
"email.jrv@gmail.com"
] |
email.jrv@gmail.com
|
c7f1fa58dbac7f6e81831d6d8a3c59cdc2507686
|
3899dd3debab668ef0c4b91c12127e714bdf3d6d
|
/venv/Lib/site-packages/tensorflow/python/grappler/cluster.py
|
9f3a130798ca0f57cbdcd49506e0c1d6fe7033db
|
[] |
no_license
|
SphericalPotatoInVacuum/CNNDDDD
|
b2f79521581a15d522d8bb52f81b731a3c6a4db4
|
03c5c0e7cb922f53f31025b7dd78287a19392824
|
refs/heads/master
| 2020-04-21T16:10:25.909319
| 2019-02-08T06:04:42
| 2019-02-08T06:04:42
| 169,691,960
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,139
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A python interface for Grappler clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.grappler.costs import op_performance_data_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.python import pywrap_tensorflow as tf_cluster
from tensorflow.python.framework import errors
class Cluster(object):
"""Grappler Clusters."""
def __init__(self,
allow_soft_placement=True,
disable_detailed_stats=True,
disable_timeline=True,
devices=None):
"""Creates a Cluster.
Args:
allow_soft_placement: If True, TF will automatically fix illegal
placements instead of erroring out if the placement isn't legal.
disable_detailed_stats: If True, detailed statistics will not be
available.
disable_timeline: If True, the timeline information will not be reported.
devices: A list of devices of type device_properties_pb2.NamedDevice.
If None, a device list will be created based on the spec of
the local machine.
"""
self._tf_cluster = None
self._generate_timeline = not disable_timeline
with errors.raise_exception_on_not_ok_status() as status:
if devices is None:
self._tf_cluster = tf_cluster.TF_NewCluster(
allow_soft_placement, disable_detailed_stats, status)
else:
devices_serialized = [device.SerializeToString() for device in devices]
self._tf_cluster = tf_cluster.TF_NewVirtualCluster(
devices_serialized, status)
def Shutdown(self):
if self._tf_cluster is not None:
tf_cluster.TF_ShutdownCluster(self._tf_cluster)
self._tf_cluster = None
def __del__(self):
self.Shutdown()
@property
def tf_cluster(self):
return self._tf_cluster
def ListDevices(self):
"""Returns the list of available hardware devices."""
devices = []
if self._tf_cluster is not None:
ret_from_swig = tf_cluster.TF_ListDevices(self._tf_cluster)
devices = []
for raw_dev in ret_from_swig:
devices.append(device_properties_pb2.NamedDevice.FromString(raw_dev))
return devices
def ListAvailableOps(self):
"""Returns a list of all the available operations (sorted alphatically)."""
return tf_cluster.TF_ListAvailableOps()
def GetSupportedDevices(self, item):
return tf_cluster.TF_GetSupportedDevices(self._tf_cluster, item.tf_item)
def EstimatePerformance(self, device):
"""Estimate the performance of the specified device."""
serialized = device.SerializeToString()
return tf_cluster.TF_EstimatePerformance(serialized)
def MeasureCosts(self, item):
"""Returns the cost of running the specified item.
Args:
item: The item for which to measure the costs.
Returns: The triplet op_perfs, runtime, step_stats.
"""
with errors.raise_exception_on_not_ok_status() as status:
ret_from_swig = tf_cluster.TF_MeasureCosts(
item.tf_item, self._tf_cluster, self._generate_timeline, status)
if ret_from_swig is None:
return None
op_perf_bytes_list, run_time, step_stats_bytes = ret_from_swig
op_perfs = []
for op_perf_bytes in op_perf_bytes_list:
op_perfs.append(
op_performance_data_pb2.OpPerformance.FromString(op_perf_bytes))
return (op_perfs, run_time,
step_stats_pb2.StepStats.FromString(step_stats_bytes))
def DeterminePeakMemoryUsage(self, item):
"""Returns a snapshot of the peak memory usage.
Args:
item: The item for which to measure the costs.
Returns: A hashtable indexed by device name.
"""
with errors.raise_exception_on_not_ok_status() as status:
ret_from_swig = tf_cluster.TF_DeterminePeakMemoryUsage(
item.tf_item, self._tf_cluster, status)
return ret_from_swig
@contextlib.contextmanager
def Provision(allow_soft_placement=True,
disable_detailed_stats=True,
disable_timeline=True,
devices=None):
cluster = Cluster(allow_soft_placement, disable_detailed_stats,
disable_timeline, devices)
yield cluster
cluster.Shutdown()
|
[
"a@bogdanov.co"
] |
a@bogdanov.co
|
cc2a32a439a92c92aa61ba4ea571b75e901de399
|
77311ad9622a7d8b88707d7cee3f44de7c8860cb
|
/res/scripts/client/gui/scaleform/daapi/view/meta/fortdatepickerpopovermeta.py
|
206f951b446d8c248b6507b491083faefdebba91
|
[] |
no_license
|
webiumsk/WOT-0.9.14-CT
|
9b193191505a4560df4e872e022eebf59308057e
|
cfe0b03e511d02c36ce185f308eb48f13ecc05ca
|
refs/heads/master
| 2021-01-10T02:14:10.830715
| 2016-02-14T11:59:59
| 2016-02-14T11:59:59
| 51,606,676
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 546
|
py
|
# 2016.02.14 12:40:19 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/FortDatePickerPopoverMeta.py
from gui.Scaleform.daapi.view.lobby.popover.SmartPopOverView import SmartPopOverView
class FortDatePickerPopoverMeta(SmartPopOverView):
pass
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\meta\fortdatepickerpopovermeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:40:19 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
7b00ee3b92761685a2e32d3a4d48ca7ab9336fda
|
25c0e72ea6889749cb269dfd26a77edfc4207d40
|
/fuzzers/009-xor_b_mux/fuzzer.py
|
10eff1adfe642697d899998d81e1ec56c7552e86
|
[
"0BSD"
] |
permissive
|
whitequark/prjbureau
|
49c2d060ca7b99042fdc751e70f10ad74309975b
|
cbe15e117449c55e7244756f00c3e34e0d92017e
|
refs/heads/main
| 2023-08-16T10:34:53.915942
| 2021-11-27T21:34:41
| 2021-11-27T21:34:41
| 227,539,435
| 44
| 8
|
NOASSERTION
| 2023-08-07T16:12:37
| 2019-12-12T06:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
from util import database, toolchain, bitdiff, progress
with database.transact() as db:
for device_name, device in db.items():
progress(device_name)
package, pinout = next(iter(device['pins'].items()))
for macrocell_idx, (macrocell_name, macrocell) in enumerate(device['macrocells'].items()):
progress(1)
def run(code):
return toolchain.run(
f"module top(input CLK, output O); "
f"wire Q; TRI tri(Q, 1'b0, O); "
f"{code} "
f"endmodule",
{
'CLK': pinout['C1'],
'ff': str(601 + macrocell_idx),
},
f"{device_name}-{package}")
f_dff = run("DFF ff(.CLK(CLK), .D(1'b0), .Q(Q));")
f_tff = run("TFF ff(.CLK(CLK), .T(1'b0), .Q(Q));")
# The GND choice of XOR B mux is shared with !PT1 and !PT2 choices: if xor_invert
# is off, then it is GND; otherwise: if pt2_mux is xor and xor_a_mux is sum, then
# it is !PT2; if pt1_mux is flb and xor_a_mux is VCC_pt2, then it is !PT1; otherwise
# it is GND. Further, the XOR B mux is linked to FLB: if XOR B mux is !PT1, then FLB
# is always 1, otherwise FLB follows pt1_mux.
macrocell.update({
'xor_b_mux':
bitdiff.describe(1, {
'VCC_pt12': f_dff,
'ff_qn': f_tff
})
})
|
[
"whitequark@whitequark.org"
] |
whitequark@whitequark.org
|
bff60c91bc6b4841943f12e48362e1aa2fbd2a68
|
6f78a4c4896563a52d86eacf49dbb6a358a3646e
|
/hackerrank/python/hackerrank_GreedyFlorist.py
|
16ef36a533bd86a3d30da8b3791ca3b24de10ad2
|
[] |
no_license
|
wj1224/algorithm_solve
|
259c39d2a85ecb2630e089eb0c86cdde9ff3baeb
|
8b0f15b71a4dd8eb40d3c9baee003a0678c3f2aa
|
refs/heads/master
| 2023-08-25T06:14:21.615802
| 2021-10-26T05:00:59
| 2021-10-26T05:00:59
| 219,981,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the getMinimumCost function below.
def getMinimumCost(k, c):
c.sort()
p = dict()
for i in range(k):
p[i] = 0
answer = 0
idx = 0
for i in range(len(c) -1, -1, -1):
if p[idx] == 0:
p[idx] = 1
answer += c[i]
else:
answer += ((p[idx] + 1) * c[i])
p[idx] += 1
idx += 1
if idx == k:
idx = 0
return answer
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
c = list(map(int, input().rstrip().split()))
minimumCost = getMinimumCost(k, c)
fptr.write(str(minimumCost) + '\n')
fptr.close()
|
[
"cwj1387@gmail.com"
] |
cwj1387@gmail.com
|
6a745e9f87961ebcdf3c39a8a5a82bb8766d12fd
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v11/resources/types/custom_interest.py
|
99614684ab894f1604193e87090c4c4a6cf627c7
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 4,315
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v11.enums.types import custom_interest_member_type
from google.ads.googleads.v11.enums.types import custom_interest_status
from google.ads.googleads.v11.enums.types import custom_interest_type
__protobuf__ = proto.module(
package="google.ads.googleads.v11.resources",
marshal="google.ads.googleads.v11",
manifest={"CustomInterest", "CustomInterestMember",},
)
class CustomInterest(proto.Message):
r"""A custom interest. This is a list of users by interest.
Attributes:
resource_name (str):
Immutable. The resource name of the custom interest. Custom
interest resource names have the form:
``customers/{customer_id}/customInterests/{custom_interest_id}``
id (int):
Output only. Id of the custom interest.
This field is a member of `oneof`_ ``_id``.
status (google.ads.googleads.v11.enums.types.CustomInterestStatusEnum.CustomInterestStatus):
Status of this custom interest. Indicates
whether the custom interest is enabled or
removed.
name (str):
Name of the custom interest. It should be
unique across the same custom affinity audience.
This field is required for create operations.
This field is a member of `oneof`_ ``_name``.
type_ (google.ads.googleads.v11.enums.types.CustomInterestTypeEnum.CustomInterestType):
Type of the custom interest, CUSTOM_AFFINITY or
CUSTOM_INTENT. By default the type is set to
CUSTOM_AFFINITY.
description (str):
Description of this custom interest audience.
This field is a member of `oneof`_ ``_description``.
members (Sequence[google.ads.googleads.v11.resources.types.CustomInterestMember]):
List of custom interest members that this
custom interest is composed of. Members can be
added during CustomInterest creation. If members
are presented in UPDATE operation, existing
members will be overridden.
"""
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=8, optional=True,)
status = proto.Field(
proto.ENUM,
number=3,
enum=custom_interest_status.CustomInterestStatusEnum.CustomInterestStatus,
)
name = proto.Field(proto.STRING, number=9, optional=True,)
type_ = proto.Field(
proto.ENUM,
number=5,
enum=custom_interest_type.CustomInterestTypeEnum.CustomInterestType,
)
description = proto.Field(proto.STRING, number=10, optional=True,)
members = proto.RepeatedField(
proto.MESSAGE, number=7, message="CustomInterestMember",
)
class CustomInterestMember(proto.Message):
r"""A member of custom interest audience. A member can be a
keyword or url. It is immutable, that is, it can only be created
or removed but not changed.
Attributes:
member_type (google.ads.googleads.v11.enums.types.CustomInterestMemberTypeEnum.CustomInterestMemberType):
The type of custom interest member, KEYWORD
or URL.
parameter (str):
Keyword text when member_type is KEYWORD or URL string when
member_type is URL.
This field is a member of `oneof`_ ``_parameter``.
"""
member_type = proto.Field(
proto.ENUM,
number=1,
enum=custom_interest_member_type.CustomInterestMemberTypeEnum.CustomInterestMemberType,
)
parameter = proto.Field(proto.STRING, number=3, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
3e5d83152601273afe64ccec38a1da8f975d3f69
|
d2cacbd1bde10e464faabc22ad5936f1aaf4e2ef
|
/data/Exp_ICIP/SingleTraining/Standard/chess/main.py
|
fd7f9227b6f2876e357707f2b72f4758b385fefe
|
[] |
no_license
|
npiasco/dl_management
|
a26950a3b53c720d881a8b7ac3fa81161a048256
|
11c29a3637efa5fd223b36664d62c704e8166bab
|
refs/heads/master
| 2021-03-16T05:44:39.806437
| 2019-09-06T13:52:52
| 2019-09-06T13:52:52
| 124,055,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,585
|
py
|
#!/usr/bin/env python
import os, sys
import setlog
conf_file = os.environ['DEV'] + 'dl_management/.log/logging.yaml'
save_file = os.path.abspath(sys.argv[0])[:-len(sys.argv[0])] + 'log/'
setlog.reconfigure(conf_file, save_file)
import system.PoseRegression as System
if __name__ == '__main__':
scene = 'chess'
machine = System.MultNet(root=os.path.abspath(sys.argv[0])[:-len(sys.argv[0])],
#trainer_file='../../feat_trainer.yaml',
trainer_file= 'trainer.yaml',
#trainer_file='../../trainer_depth.yaml',
dataset_file = '../../../datasets/' + scene + '.yaml',
#cnn_type='../../cnn.yaml'
cnn_type='../../vladcnn.yaml'
)
action = input('Exec:\n[t]\ttrain\n[e]\ttest\n[p]\tprint (console)\n[P]\tprint (full)\n[ ]\ttrain+test\n')
if action == 't':
machine.train()
elif action == 'e':
machine.test()
machine.plot(print_loss=False, print_val=False)
elif action == 'ef':
machine.test_on_final()
machine.plot(print_loss=False, print_val=False)
elif action == 'p':
machine.plot(print_loss=False, print_val=False)
elif action == 'P':
machine.plot()
elif action == 'm':
machine.map_print(batch_size=1)
elif action == 'mf':
machine.map_print(final=True, batch_size=1)
elif action == 's':
machine.serialize_net()
elif action == 'sf':
machine.serialize_net(final=True)
elif action == 'pose':
machine.view_localization(pas=3)
elif action == 'posef':
machine.view_localization(pas=3, final=True)
elif action == 'model':
machine.creat_model()
elif action == 'modeld':
machine.creat_model(fake_depth=True)
elif action == 'modelt':
machine.creat_model(test=True)
elif action == 'modeldt':
machine.creat_model(test=True, fake_depth=True)
elif action == 'clusters':
machine.creat_clusters(64, size_feat=256, map_feat='conv7')
elif action == 'thresh':
machine.threshold_selection(final=True, dataset='test', load=False, beg=0.0, n_values=2000)
elif action == 'threshl':
machine.threshold_selection(final=True, dataset='test', load=True, beg=0.0, n_values=2000)
elif action == '':
machine.train()
machine.test()
machine.plot(print_loss=False, print_val=False)
else:
raise ValueError('Unknown cmd: {}'.format(action))
|
[
"nathan.piasco@gmail.com"
] |
nathan.piasco@gmail.com
|
5497f6ee6391b9ac43175da5e71e6258fe100482
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2343/60747/290105.py
|
acd1d87a6a3d7f48f8d17e9d3a7187668c2002bd
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
n=input().split(" ")
s=""
for i in range(int(n[0])+int(n[1])):
s=s+input()
if s=="n<><>un<>nnuonuu<>un<><>u<><>o<><>n<><>u<><>n<><>u":print("RLLRLRR")
else:print(s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
422b6c2b3875c8bc1e6c5e9adb460cba8e8e15e5
|
f303936feb0f221ea1ccb3ef1eae57654aa0325a
|
/server/szurubooru/func/users.py
|
3c39fb5a119d15c7080f29da463b20de76a58417
|
[] |
no_license
|
hnamquoc/szurubooru
|
7b695e232c7f601dc95f77fbb7570aef3e16ddd9
|
16d4d3ca68964eb7759b629ec84eb6b14d9d7cdb
|
refs/heads/master
| 2020-12-31T01:23:26.322422
| 2016-05-21T20:29:31
| 2016-05-21T20:35:18
| 59,414,380
| 1
| 0
| null | 2016-05-22T13:41:37
| 2016-05-22T13:41:36
| null |
UTF-8
|
Python
| false
| false
| 6,152
|
py
|
import datetime
import re
from sqlalchemy import func
from szurubooru import config, db, errors
from szurubooru.func import auth, util, files, images
class UserNotFoundError(errors.NotFoundError): pass
class UserAlreadyExistsError(errors.ValidationError): pass
class InvalidUserNameError(errors.ValidationError): pass
class InvalidEmailError(errors.ValidationError): pass
class InvalidPasswordError(errors.ValidationError): pass
class InvalidRankError(errors.ValidationError): pass
class InvalidAvatarError(errors.ValidationError): pass
def serialize_user(user, authenticated_user, force_show_email=False):
if not user:
return {}
ret = {
'name': user.name,
'rank': user.rank,
'creationTime': user.creation_time,
'lastLoginTime': user.last_login_time,
'avatarStyle': user.avatar_style,
'email': user.email,
}
if user.avatar_style == user.AVATAR_GRAVATAR:
ret['avatarUrl'] = 'http://gravatar.com/avatar/%s?d=retro&s=%d' % (
util.get_md5((user.email or user.name).lower()),
config.config['thumbnails']['avatar_width'])
else:
ret['avatarUrl'] = '%s/avatars/%s.png' % (
config.config['data_url'].rstrip('/'), user.name.lower())
if authenticated_user.user_id != user.user_id \
and not force_show_email \
and not auth.has_privilege(authenticated_user, 'users:edit:any:email'):
del ret['email']
return ret
def serialize_user_with_details(user, authenticated_user, **kwargs):
return {'user': serialize_user(user, authenticated_user, **kwargs)}
def get_user_count():
return db.session.query(db.User).count()
def try_get_user_by_name(name):
return db.session \
.query(db.User) \
.filter(func.lower(db.User.name) == func.lower(name)) \
.one_or_none()
def get_user_by_name(name):
user = try_get_user_by_name(name)
if not user:
raise UserNotFoundError('User %r not found.' % name)
return user
def try_get_user_by_name_or_email(name_or_email):
return db.session \
.query(db.User) \
.filter(
(func.lower(db.User.name) == func.lower(name_or_email))
| (func.lower(db.User.email) == func.lower(name_or_email))) \
.one_or_none()
def get_user_by_name_or_email(name_or_email):
user = try_get_user_by_name_or_email(name_or_email)
if not user:
raise UserNotFoundError('User %r not found.' % name_or_email)
return user
def create_user(name, password, email):
user = db.User()
update_user_name(user, name)
update_user_password(user, password)
update_user_email(user, email)
if get_user_count() > 0:
user.rank = util.flip(auth.RANK_MAP)[config.config['default_rank']]
else:
user.rank = db.User.RANK_ADMINISTRATOR
user.creation_time = datetime.datetime.now()
user.avatar_style = db.User.AVATAR_GRAVATAR
return user
def update_user_name(user, name):
if not name:
raise InvalidUserNameError('Name cannot be empty.')
if util.value_exceeds_column_size(name, db.User.name):
raise InvalidUserNameError('User name is too long.')
other_user = try_get_user_by_name(name)
if other_user and other_user.user_id != user.user_id:
raise UserAlreadyExistsError('User %r already exists.' % name)
name = name.strip()
name_regex = config.config['user_name_regex']
if not re.match(name_regex, name):
raise InvalidUserNameError(
'User name %r must satisfy regex %r.' % (name, name_regex))
user.name = name
def update_user_password(user, password):
if not password:
raise InvalidPasswordError('Password cannot be empty.')
password_regex = config.config['password_regex']
if not re.match(password_regex, password):
raise InvalidPasswordError(
'Password must satisfy regex %r.' % password_regex)
user.password_salt = auth.create_password()
user.password_hash = auth.get_password_hash(user.password_salt, password)
def update_user_email(user, email):
if email:
email = email.strip()
if not email:
email = None
if email and util.value_exceeds_column_size(email, db.User.email):
raise InvalidEmailError('Email is too long.')
if not util.is_valid_email(email):
raise InvalidEmailError('E-mail is invalid.')
user.email = email
def update_user_rank(user, rank, authenticated_user):
if not rank:
raise InvalidRankError('Rank cannot be empty.')
rank = util.flip(auth.RANK_MAP).get(rank.strip(), None)
all_ranks = list(auth.RANK_MAP.values())
if not rank:
raise InvalidRankError(
'Rank can be either of %r.' % all_ranks)
if rank in (db.User.RANK_ANONYMOUS, db.User.RANK_NOBODY):
raise InvalidRankError('Rank %r cannot be used.' % auth.RANK_MAP[rank])
if all_ranks.index(authenticated_user.rank) \
< all_ranks.index(rank) and get_user_count() > 0:
raise errors.AuthError('Trying to set higher rank than your own.')
user.rank = rank
def update_user_avatar(user, avatar_style, avatar_content):
if avatar_style == 'gravatar':
user.avatar_style = user.AVATAR_GRAVATAR
elif avatar_style == 'manual':
user.avatar_style = user.AVATAR_MANUAL
if not avatar_content:
raise InvalidAvatarError('Avatar content missing.')
image = images.Image(avatar_content)
image.resize_fill(
int(config.config['thumbnails']['avatar_width']),
int(config.config['thumbnails']['avatar_height']))
files.save('avatars/' + user.name.lower() + '.png', image.to_png())
else:
raise InvalidAvatarError(
'Avatar style %r is invalid. Valid avatar styles: %r.' % (
avatar_style, ['gravatar', 'manual']))
def bump_user_login_time(user):
user.last_login_time = datetime.datetime.now()
def reset_user_password(user):
password = auth.create_password()
user.password_salt = auth.create_password()
user.password_hash = auth.get_password_hash(user.password_salt, password)
return password
|
[
"rr-@sakuya.pl"
] |
rr-@sakuya.pl
|
f4634675d8b38cab07ad7568cd4c1eb03d5df4c7
|
bddc40a97f92fafb8cbbbfdbdfe6774996578bb0
|
/exercicioLista01/ex09.py
|
3d885e7b04164115b80e89e056de00459bd1665a
|
[] |
no_license
|
andrehmiguel/treinamento
|
8f83041bd51387dd3e5cafed09c4bb0a08d0e375
|
ed18e6a8cfba0baaa68757c12893c62a0938a67e
|
refs/heads/main
| 2023-01-31T13:15:58.113392
| 2020-12-16T02:47:44
| 2020-12-16T02:47:44
| 317,631,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
#09 Faça um Programa que peça a temperatura em graus Fahrenheit, transforme e mostre a temperatura em graus Celsius.
#C = 5 * ((F-32) / 9).
F = int(input("Informe a temperatura em F: "))
C = 5 * ((F-32) / 9)
print(F, "graus Fahrenheit, equivalem a", C, "graus Celsius.")
|
[
"andrehmiguel@outlook.com"
] |
andrehmiguel@outlook.com
|
7bcb3f7715b24df699c20989fad420f6b3ed7bb7
|
930a868ae9bbf85df151b3f54d04df3a56bcb840
|
/benchmark/slurm_utilities/slurm_rerun_failed.py
|
e949b3379c738b6570226a175e386621067b1975
|
[
"MIT"
] |
permissive
|
yuewuo/QEC-Playground
|
1148f3c5f4035c069986d8b4103acf7f1e34f9d4
|
462208458cdf9dc8a33d4553a560f8a16c00e559
|
refs/heads/main
| 2023-08-10T13:05:36.617858
| 2023-07-22T23:48:49
| 2023-07-22T23:48:49
| 312,809,760
| 16
| 1
|
MIT
| 2023-07-22T23:48:51
| 2020-11-14T12:10:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
import os, sys, subprocess, time
import slurm_distribute
def rerun_failed(sbatch_file_path, failed_cases, slurm_commands_vec=None, use_interactive_partition=False):
# generate rerun sbatch file
sbatch_file_folder = os.path.dirname(sbatch_file_path)
rerun_file_path = os.path.join(sbatch_file_folder, "rerun-" + os.path.basename(sbatch_file_path))
with open(sbatch_file_path, "r", encoding="utf8") as f:
lines = f.readlines()
with open(rerun_file_path, "w", encoding="utf8") as f:
for line in lines:
if line.startswith("#SBATCH --array="):
f.write(f"#SBATCH --array={','.join([str(e) for e in failed_cases])}\n")
else:
f.write(line)
print("rerun_file_path", rerun_file_path)
slurm_distribute.slurm_run_sbatch_wait(rerun_file_path, failed_cases, original_sbatch_file_path=sbatch_file_path, slurm_commands_vec=slurm_commands_vec, use_interactive_partition=use_interactive_partition)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("usage: <sbatch_file_path> <failed_cases: comma separated>")
exit(-1)
sbatch_file_path = os.path.abspath(sys.argv[1])
failed_cases = [int(e) for e in sys.argv[2].split(",")]
rerun_failed(sbatch_file_path, failed_cases)
|
[
"yue.wu@yale.edu"
] |
yue.wu@yale.edu
|
8e633d8ebb598671323b8487afebce2f6f963568
|
26771494974942f4ab18d2cd8247506c344e1d14
|
/1-50/003-v2-longestSubstringWithoutRepeatingCharacters.py
|
1b623b33716c725631f7c4fd8f5ea735357865ac
|
[] |
no_license
|
wangyunpengbio/LeetCode
|
9f4c6076e067c5e847d662679483f737d40e8ca5
|
cec1fd11fe43177abb2d4236782c0f116e6e8bce
|
refs/heads/master
| 2020-04-29T22:28:25.899420
| 2020-04-03T07:37:26
| 2020-04-03T07:37:26
| 176,448,957
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
# 参考答案的滑动窗口方法,用双指针来进行滑动
n = len(s)
myset = set()
ans = 0
i = 0
j = 0
while i < n and j < n:
if s[j] not in myset:
myset.add(s[j])
j = j + 1
ans = max(ans,j-i)
else:
myset.remove(s[i])
i = i + 1
return ans
|
[
"wangyunpeng_bio@qq.com"
] |
wangyunpeng_bio@qq.com
|
5f23a4262ec4073b1f163b28f7c67f2d5e26d020
|
5a545262f7c053c1cfd1f7984664e3220c745161
|
/casper4/griefing_factor_calculator.py
|
b51af758356363f00bf152f26b683100969d0483
|
[
"MIT"
] |
permissive
|
ethereum/research
|
2c523e5796cfdb6055e0107dc1768fbf164ecad0
|
bb873f8ad0e673803ec6a55be26678e1f99b9ece
|
refs/heads/master
| 2023-09-04T19:11:51.507361
| 2023-08-30T01:52:05
| 2023-08-30T01:52:05
| 42,808,596
| 1,774
| 603
|
MIT
| 2023-04-21T07:20:21
| 2015-09-20T10:13:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,335
|
py
|
# Computes griefing factors of various parameter sets for Casper the
# Friendly Finality Gadget
# Case 1: <1/3 non-commit (optimal if epsilon participate)
def gf1(x1, x2, x3, x4, x5):
return x2 / x1
# Case 2: censor <1/3 committers (optimal if 1/3 get censored)
def gf2(x1, x2, x3, x4, x5):
return 1.5 * (x1 + x2 / 3) / x2
# Generalized case 2
#k = 0.25
#def gf2(x1, x2, x3, x4, x5):
# return (x1 * k + x2 * k**2) / (x2 * k * (1-k))
# Case 3: <1/3 non-prepare (optimal if epsilon participate)
def gf3(x1, x2, x3, x4, x5):
return x4 / x3
# Case 4: censor <1/3 preparers (optimal if 1/3 get censored)
def gf4(x1, x2, x3, x4, x5):
return 1.5 * (x3 + x4 / 3) / x4
# Case 5: finality preventing 1/3 non-commits
def gf5(x1, x2, x3, x4, x5):
return 2 * (x5 + x2 / 3) / (x5 + x1 + x2 / 3)
# Case 6: censor commits
def gf6(x1, x2, x3, x4, x5):
# Case 6a: 51% participate
return max(1 + x2 / (x5 + x1 + x2 / 2),
# Case 6b: 67% participate
(x5 + x1 + x2 / 3) / (x5 + x2 / 3) / 2)
# Case 7: finality and commit-preventing 1/3 non-prepares
def gf7(x1, x2, x3, x4, x5):
return 2 * (x5 + x4 / 3) / (x5 + x3 + x4 / 3)
gfs = (gf1, gf2, gf3, gf4, gf5, gf6, gf7)
# Get the maximum griefing factor of a set of parameters
def getmax(*args):
return max([f(*args) for f in gfs])
# Get the maximum <50% griefing factor, and enforce a bound
# of MAX_CENSOR_GF on the griefing factor of >50% coalitions
def getmax2(*args):
MAX_CENSOR_GF = 2
if gf2(*args) > MAX_CENSOR_GF or gf4(*args) > MAX_CENSOR_GF or \
gf6(*args) > MAX_CENSOR_GF:
return 999999999999999999
return max(gf1(*args), gf3(*args), gf5(*args), gf7(*args))
# Range to test for each parameter
my_range = [i/12. for i in range(1, 61)]
best_vals = (1, 0, 0, 0, 0)
best_score = 999999999999999999
# print([f(5, 6, 5, 6, 0) for f in gfs])
for x1 in my_range:
for x2 in my_range:
for x3 in my_range:
for x4 in my_range:
o = getmax2(x1, x2, x3, x4, 1)
if o < best_score:
best_score = o
best_vals = (x1, x2, x3, x4, 1)
if o <= 1:
print((x1, x2, x3, x4, 1), [f(x1, x2, x3, x4, 1) for f in gfs])
print('result', best_vals, best_score)
print([f(*best_vals) for f in gfs])
|
[
"v@buterin.com"
] |
v@buterin.com
|
826e890c5538a5e47ee9b6d19b96e2365eb6aab2
|
05caf48bd067c050666026b75686f23d02327378
|
/_560.py
|
de64e26467444d25f8d5aaf3e39947d672b14bd7
|
[
"MIT"
] |
permissive
|
elfgzp/Leetcode
|
3b6fa307c699fd5a1ba5ea88988c324c33a83eb7
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
refs/heads/master
| 2023-08-21T23:11:38.265884
| 2020-10-17T11:55:45
| 2020-10-17T11:55:45
| 168,635,331
| 3
| 0
|
MIT
| 2023-07-21T03:50:43
| 2019-02-01T03:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 488
|
py
|
class Solution:
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
res = 0
pre_sum = 0
dic = {0: 1}
for n in nums:
pre_sum += n
if pre_sum - k in dic:
res += dic[pre_sum - k]
dic[pre_sum] = dic.get(pre_sum, 0) + 1
return res
if __name__ == '__main__':
s = Solution()
print(s.subarraySum([1, 1, 1], 2))
|
[
"741424975@qq.com"
] |
741424975@qq.com
|
0f4025b60d2f552b2859125fbcd22ff802197eb0
|
a882ccf759025735f926695d6a5a39937854646a
|
/c_step16/conf.py
|
02c981773f5192a24679711db370473f365f18be
|
[] |
no_license
|
muzudho/practice-open-cv2
|
5c1534564bcf43c2d8f7a6fb4ee1583bd77337f9
|
55af5cfb37587b08123b404cf8768d83148cb046
|
refs/heads/main
| 2023-07-08T02:23:22.984816
| 2021-08-10T10:45:01
| 2021-08-10T10:45:01
| 349,864,518
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""設定
* 横幅は 450px 程度以下
* crieitブログに貼りつけるとき、横幅が広すぎると圧縮されて gifアニメ ではなくなってしまう
* ファイルサイズは 2MB 以下
* crieitブログの画像貼付け制限
"""
# グリッド間隔
GRID_UNIT = 16
# 色相環一周分のコマ数
PHASE_COUNTS = 24
# フォント倍率
FONT_SCALE = 0.5
|
[
"muzudho1@gmail.com"
] |
muzudho1@gmail.com
|
803e0e4dcc3f1532c1b2fb227753c3c4ba7c6bde
|
a2dc75a80398dee58c49fa00759ac99cfefeea36
|
/bluebottle/cms/migrations/0033_auto_20171017_1353.py
|
b08c6860a74195c4ea8fe4b46ee081f05535c972
|
[
"BSD-2-Clause"
] |
permissive
|
onepercentclub/bluebottle
|
e38b0df2218772adf9febb8c6e25a2937889acc0
|
2b5f3562584137c8c9f5392265db1ab8ee8acf75
|
refs/heads/master
| 2023-08-29T14:01:50.565314
| 2023-08-24T11:18:58
| 2023-08-24T11:18:58
| 13,149,527
| 15
| 9
|
BSD-3-Clause
| 2023-09-13T10:46:20
| 2013-09-27T12:09:13
|
Python
|
UTF-8
|
Python
| false
| false
| 690
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-17 11:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('cms', '0032_migrate_projects_3'),
]
operations = [
migrations.AddField(
model_name='slidescontent',
name='sub_title',
field=models.CharField(blank=True, max_length=70, null=True),
),
migrations.AddField(
model_name='slidescontent',
name='title',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
|
[
"ernst@onepercentclub.com"
] |
ernst@onepercentclub.com
|
5d375d43bc7c4bc5917a3045e557f480db9b73f0
|
7298d1692c6948f0880e550d6100c63a64ce3ea1
|
/catalog-configs/Vocab/ihm_modeling_post_process_feature_term.py
|
850ac847e5be209c3b038500354e741d8f1015e4
|
[] |
no_license
|
informatics-isi-edu/protein-database
|
b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d
|
ce4be1bf13e6b1c22f3fccbb513824782609991f
|
refs/heads/master
| 2023-08-16T10:24:10.206574
| 2023-07-25T23:10:42
| 2023-07-25T23:10:42
| 174,095,941
| 2
| 0
| null | 2023-06-16T19:44:43
| 2019-03-06T07:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,143
|
py
|
import argparse
from attrdict import AttrDict
from deriva.core import ErmrestCatalog, get_credential, DerivaPathError
from deriva.utils.catalog.components.deriva_model import DerivaCatalog
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b'
}
table_name = 'ihm_modeling_post_process_feature_term'
schema_name = 'Vocab'
column_annotations = {
'RCT': {
chaise_tags.display: {
'name': 'Creation Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMT': {
chaise_tags.display: {
'name': 'Last Modified Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RCB': {
chaise_tags.display: {
'name': 'Created By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMB': {
chaise_tags.display: {
'name': 'Modified By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'ID': {},
'URI': {},
'Name': {},
'Description': {},
'Synonyms': {},
'Owner': {}
}
column_comment = {
'ID': 'The preferred Compact URI (CURIE) for this term.',
'URI': 'The preferred URI for this term.',
'Name': 'The preferred human-readable name for this term.',
'Description': 'A longer human-readable description of this term.',
'Synonyms': 'Alternate human-readable names for this term.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'ID',
em.builtin_types['ermrest_curie'],
nullok=False,
default='PDB:{RID}',
comment=column_comment['ID'],
),
em.Column.define(
'URI',
em.builtin_types['ermrest_uri'],
nullok=False,
default='/id/{RID}',
comment=column_comment['URI'],
),
em.Column.define(
'Name', em.builtin_types['text'], nullok=False, comment=column_comment['Name'],
),
em.Column.define(
'Description',
em.builtin_types['markdown'],
nullok=False,
comment=column_comment['Description'],
),
em.Column.define('Synonyms', em.builtin_types['text[]'], comment=column_comment['Synonyms'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', 'Name', 'Description', 'ID', 'URI',
['Vocab', 'ihm_modeling_post_process_feature_term_RCB_fkey'],
['Vocab', 'ihm_modeling_post_process_feature_term_RMB_fkey'], 'RCT', 'RMT',
['Vocab', 'ihm_modeling_post_process_feature_term_Owner_fkey']
]
}
table_display = {'row_name': {'row_markdown_pattern': '{{{Name}}}'}}
table_annotations = {
chaise_tags.table_display: table_display,
chaise_tags.visible_columns: visible_columns,
}
table_comment = 'A set of controlled vocabular terms.'
table_acls = {}
table_acl_bindings = {
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['RCB'],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(
['RID'], constraint_names=[('Vocab', 'ihm_modeling_post_process_feature_term_RIDkey1')],
),
em.Key.define(
['ID'], constraint_names=[('Vocab', 'ihm_modeling_post_process_feature_term_IDkey1')],
),
em.Key.define(
['URI'], constraint_names=[('Vocab', 'ihm_modeling_post_process_feature_term_URIkey1')],
),
]
fkey_defs = [
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[('Vocab', 'ihm_modeling_post_process_feature_term_Owner_fkey')],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'ihm_modeling_post_process_feature_term_RCB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'ihm_modeling_post_process_feature_term_RMB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 5
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = DerivaCatalog(host, catalog_id=catalog_id, validate=False)
main(catalog, mode, replace)
|
[
"carl@isi.edu"
] |
carl@isi.edu
|
1469b0ca35fbff5011c461f263785d99282f79f6
|
7a2125b1b4712142e7e1cce21f5ffcb14a6033bc
|
/shh/__main__.py
|
664ce9e256b504b70eef92b0c4cac7556be758f5
|
[] |
no_license
|
keenhenry/shh
|
060127f22bfe37ce7c2f391070184e646e9c82b7
|
f4d95dd5341df74195197d8527a4a4e5b0f548b0
|
refs/heads/master
| 2021-01-17T19:57:01.449859
| 2016-08-07T16:14:25
| 2016-08-07T16:14:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
import shh
import time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=None, type=int)
parser.add_argument('-k', '--key', default=None, type=str)
parser.add_argument('-s', '--server', action='store_true')
args = parser.parse_args()
if args.port is None:
port = shh.utils.find_port()
else:
port = args.port
print('Local port: {}'.format(port))
print('Creating hidden service...')
hidden = shh.HiddenService(port, key_file=args.key)
print('Serving at: ' + hidden.onion)
if args.server:
try:
from socketserver import TCPServer
except ImportError:
from SocketServer import TCPServer
try:
from http.server import SimpleHTTPRequestHandler
except:
from SimpleHTTPServer import SimpleHTTPRequestHandler
print('Serving current directory')
server = TCPServer(('', port), SimpleHTTPRequestHandler)
server.serve_forever()
else:
while True:
time.sleep(1);
|
[
"davy.wybiral@gmail.com"
] |
davy.wybiral@gmail.com
|
3a14bf609ba29095c5139eff1ced4c4fe38640f0
|
9398d8433fdb29ee630a6ee43a07bc36a2adbd88
|
/neutronclient/neutron/v2_0/fw/firewallrule.py
|
e77e96facdb91dac0a91fec5398a6d4c81fa0b36
|
[] |
no_license
|
bopopescu/OpenStack_Liberty_Control
|
ca5a21d0c32c55dc8c517f5c7c9938ce575a4888
|
0f6ec1b4d38c47776fdf8935266bcaef2464af4c
|
refs/heads/master
| 2022-12-03T10:41:53.210667
| 2016-03-29T06:25:58
| 2016-03-29T06:25:58
| 282,089,815
| 0
| 0
| null | 2020-07-24T01:04:15
| 2020-07-24T01:04:14
| null |
UTF-8
|
Python
| false
| false
| 5,437
|
py
|
# Copyright 2013 Big Switch Networks
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from neutronclient._i18n import _
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronv20
class ListFirewallRule(neutronv20.ListCommand):
"""List firewall rules that belong to a given tenant."""
resource = 'firewall_rule'
list_columns = ['id', 'name', 'firewall_policy_id', 'summary', 'enabled']
pagination_support = True
sorting_support = True
def extend_list(self, data, parsed_args):
for d in data:
val = []
if d.get('protocol'):
protocol = d['protocol'].upper()
else:
protocol = 'no-protocol'
val.append(protocol)
if 'source_ip_address' in d and 'source_port' in d:
src = 'source: ' + str(d['source_ip_address']).lower()
src = src + '(' + str(d['source_port']).lower() + ')'
else:
src = 'source: none specified'
val.append(src)
if 'destination_ip_address' in d and 'destination_port' in d:
dst = 'dest: ' + str(d['destination_ip_address']).lower()
dst = dst + '(' + str(d['destination_port']).lower() + ')'
else:
dst = 'dest: none specified'
val.append(dst)
if 'action' in d:
action = d['action']
else:
action = 'no-action'
val.append(action)
d['summary'] = ',\n '.join(val)
class ShowFirewallRule(neutronv20.ShowCommand):
"""Show information of a given firewall rule."""
resource = 'firewall_rule'
class CreateFirewallRule(neutronv20.CreateCommand):
"""Create a firewall rule."""
resource = 'firewall_rule'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('Name for the firewall rule.'))
parser.add_argument(
'--description',
help=_('Description for the firewall rule.'))
parser.add_argument(
'--shared',
dest='shared',
action='store_true',
help=_('Set shared to True (default is False).'),
default=argparse.SUPPRESS)
parser.add_argument(
'--ip-version',
type=int, choices=[4, 6], default=4,
help=_('IP version for the firewall rule (default is 4).'))
parser.add_argument(
'--source-ip-address',
help=_('Source IP address or subnet.'))
parser.add_argument(
'--destination-ip-address',
help=_('Destination IP address or subnet.'))
parser.add_argument(
'--source-port',
help=_('Source port (integer in [1, 65535] or range in a:b).'))
parser.add_argument(
'--destination-port',
help=_('Destination port (integer in [1, 65535] or range in '
'a:b).'))
utils.add_boolean_argument(
parser, '--enabled', dest='enabled',
help=_('Whether to enable or disable this rule.'))
parser.add_argument(
'--protocol', choices=['tcp', 'udp', 'icmp', 'any'],
required=True,
help=_('Protocol for the firewall rule.'))
parser.add_argument(
'--action',
required=True,
choices=['allow', 'deny', 'reject'],
help=_('Action for the firewall rule.'))
def args2body(self, parsed_args):
body = {}
neutronv20.update_dict(parsed_args, body,
['name', 'description', 'shared', 'protocol',
'source_ip_address', 'destination_ip_address',
'source_port', 'destination_port',
'action', 'enabled', 'tenant_id',
'ip_version'])
protocol = parsed_args.protocol
if protocol == 'any':
protocol = None
body['protocol'] = protocol
return {self.resource: body}
class UpdateFirewallRule(neutronv20.UpdateCommand):
"""Update a given firewall rule."""
resource = 'firewall_rule'
def add_known_arguments(self, parser):
parser.add_argument(
'--protocol', choices=['tcp', 'udp', 'icmp', 'any'],
required=False,
help=_('Protocol for the firewall rule.'))
def args2body(self, parsed_args):
body = {}
protocol = parsed_args.protocol
if protocol:
if protocol == 'any':
protocol = None
body['protocol'] = protocol
return {self.resource: body}
class DeleteFirewallRule(neutronv20.DeleteCommand):
"""Delete a given firewall rule."""
resource = 'firewall_rule'
|
[
"tony.pig@gmail.com"
] |
tony.pig@gmail.com
|
0a7fe6879f0410d3164ee9629b9d1a10ae90c8b7
|
9c87f4c9956ccf1ca2e9f75916fad54d7cafa336
|
/harbor/db/hmysql.py
|
38b9652bc238b6ac067aa84b7e3863d886a68554
|
[] |
no_license
|
zbcbcbc/harbor
|
21cbaf1dd5c12d2ca5f56ddaa62355d3a3226f52
|
79d65b2b24dffafd425e423dc54c4810497a7613
|
refs/heads/master
| 2020-05-24T15:24:13.274030
| 2016-09-04T18:17:26
| 2016-09-04T18:17:26
| 67,360,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
#coding: utf-8
__filename__ = "h_db.py"
__description__ = "harbor project database wrapper module"
__author__ = "Bicheng Zhang"
__copyright__ = "Copyright 2012-2013, The Harbor Project"
__credits__ = "Bicheng Zhang"
__email__ = "viczhang1990@gmail.com"
__version__ = "0.6"
__status__ = "Development"
from twisted.enterprise import adbapi
from twisted.python import log
from txredis.protocol import Redis
from twisted.internet.protocol import ClientCreator
from zope.interface import Interface, Attribute, implements
DB_DRIVER = "MySQLdb"
DB_ARGS = {
'db':'harbor',
'user':'root',
'passwd':'NPC8803zbc'
}
class IHarborDB(Interface):
"""
"""
def query(q):
"""
"""
class HarborDB(object):
def __init__(self):
self.dbpool = adbapi.ConnectionPool(DB_DRIVER, **DB_ARGS)
def query(self, query):
"""
"""
pass
class ReconnectingConnectionPool(adbapi.ConnectionPool):
"""Reconnecting adbapi conection pool for MySQL
see
https://twistedmatrix.com/pipermail/twisted-python/2009-July/0200007.html
"""
def _runInteration(self, interation, *args, **kw):
try:
return adbapi.ConnectionPool._runInteration(self, interation, *args,
**kw)
except MySQLdb.OperationalError, e:
if e[0] not in (2006, 2013):
raise
log.msg("RPC: got error %s, retrying operation" %(e))
conn = self.connections.get(self.threadID())
self.disconnect(conn)
# try the interation again
return adbapi.ConnectionPool._runInteration(self, interation, *args,
**kw)
|
[
"viczhang1990@gmail.com"
] |
viczhang1990@gmail.com
|
224c6c23b0e05f14161b6e1183aab61d954fa5b5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03815/s360092462.py
|
f1693763337efdfcac142b7361358d714a452a96
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
X = int(input())
cnt = X // 11
X -= 11 * cnt
cnt *= 2
sum = 0
y = 5 if cnt % 2 == 1 else 6
while sum < X:
sum += y
y = 6 if y == 5 else 5
cnt += 1
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c39006b1275d87024fe9d99fbb246ea4b6a57844
|
e6476f18faef8210189c5bc6097a0a108265173c
|
/quadpy/nsimplex/walkington.py
|
457a6c5bce2e410fcb09d242cb2da974b6f23349
|
[
"MIT"
] |
permissive
|
acimpoeru/quadpy
|
4d96ed6fc20fd53148508f8a4a9b657a5d30269d
|
0261efd68e4094af31ee7a82c8099f0d88846d5a
|
refs/heads/master
| 2021-04-12T12:15:22.899532
| 2018-02-14T15:59:27
| 2018-02-14T15:59:27
| 126,213,721
| 0
| 1
|
MIT
| 2018-03-21T17:07:29
| 2018-03-21T17:07:27
| null |
UTF-8
|
Python
| false
| false
| 4,440
|
py
|
# -*- coding: utf-8 -*-
#
from __future__ import division
from math import factorial
import numpy
import sympy
from ..helpers import untangle
class Walkington(object):
'''
Noel J. Walkington,
Quadrature on simplices of arbitrary dimension,
Technical Report,
CMU, 2000,
<http://www.math.cmu.edu/~nw0z/publications/00-CNA-023/023abs/>.
'''
def __init__(self, d, index, symbolic=False):
frac = sympy.Rational if symbolic else lambda x, y: x/y
sqrt = numpy.vectorize(sympy.sqrt) if symbolic else numpy.sqrt
self.name = 'Walkington({})'.format(index)
self.dim = d
if index == 1:
self.degree = 1
data = [(frac(1, factorial(d)), _c(d, frac))]
elif index == 2:
# The article claims order 2, but tests really only show order 1.
# Also, the article says:
#
# > The points are inside the simplex when the positive square root
# > is selected.
#
# Not sure what this means, but for d>=2, the points are outside
# the simplex.
self.degree = 1
data = [
(frac(1, factorial(d+1)), _xi1(d, 1/sqrt(d+1)))
]
elif index == 3:
self.degree = 3
data = [
(frac(-(d+1)**3, 4 * factorial(d+2)), _c(d, frac)),
(frac(+(d+3)**3, 4 * factorial(d+3)), _xi1(d, frac(1, (d+3)))),
]
elif index == 5:
self.degree = 5
w0 = frac(+(d+1)**5, 32 * factorial(d+3))
w1 = frac(-(d+3)**5, 16 * factorial(d+4))
w2 = frac(+(d+5)**5, 16 * factorial(d+5))
data = [
(w0, _c(d, frac)),
(w1, _xi1(d, frac(1, d+3))),
(w2, _xi1(d, frac(1, d+5))),
(w2, _xi11(d, frac(1, d+5), frac)),
]
else:
assert index == 7
self.degree = 7
w0 = -frac(1, 384) * frac((d+1)**7, factorial(d+4))
w1 = +frac(1, 128) * frac((d+3)**7, factorial(d+5))
w2 = -frac(1, 64) * frac((d+5)**7, factorial(d+6))
w3 = +frac(1, 64) * frac((d+7)**7, factorial(d+7))
data = [
(w0, _c(d, frac)),
(w1, _xi1(d, frac(1, d+3))),
(w2, _xi1(d, frac(1, d+5))),
(w2, _xi11(d, frac(1, d+5), frac)),
(w3, _xi1(d, frac(1, d+7))),
(w3, _xi21(d, frac(1, d+7), frac)),
(w3, _xi111(d, frac(1, d+7), frac)),
]
self.bary, self.weights = untangle(data)
self.points = self.bary[:, 1:]
# normalize weights
self.weights /= numpy.sum(self.weights)
return
def _c(d, frac):
return numpy.array([
numpy.full(d+1, frac(1, d+1))
])
def _xi1(d, a):
out = numpy.full((d+1, d+1), a)
b = 1 - d*a
numpy.fill_diagonal(out, b)
return out
def _xi11(d, a, frac):
assert d > 1
b = frac(1 - (d-1) * a, 2)
if d == 2:
out = numpy.array([
[b, b, a],
[b, a, b],
[a, b, b],
])
else:
assert d == 3
out = numpy.array([
[b, b, a, a],
[b, a, b, a],
[b, a, a, b],
[a, b, a, b],
[a, a, b, b],
[a, b, b, a],
])
return out
def _xi21(d, a, frac):
assert d > 1
b = frac(1 - (d-2) * a, 3)
# ERR Note that the article wrongly states (d-2) the the expression for c.
c = 1 - (d-1) * a - b
if d == 2:
out = numpy.array([
[b, c, a],
[c, b, a],
[c, a, b],
[b, a, c],
[a, b, c],
[a, c, b],
])
else:
assert d == 3
out = numpy.array([
[b, c, a, a],
[b, a, c, a],
[b, a, a, c],
[a, b, a, c],
[a, a, b, c],
[a, b, c, a],
[c, b, a, a],
[c, a, b, a],
[c, a, a, b],
[a, c, a, b],
[a, a, c, b],
[a, c, b, a],
])
return out
def _xi111(d, a, frac):
assert d == 3
b = frac(1 - (d-2) * a, 3)
out = numpy.array([
[b, b, b, a],
[b, b, a, b],
[b, a, b, b],
[a, b, b, b],
])
return out
|
[
"nico.schloemer@gmail.com"
] |
nico.schloemer@gmail.com
|
7b2fd7df1cb13035e43f24f46fef589ad5e91ab3
|
9d1238fb0e4a395d49a7b8ff745f21476c9d9c00
|
/framework/Tests/PAS/PAS/GeneralSecrets/SecretsV2Folders/MemberPermissions/API/test_member_add_single_folder_then_append_multilevel_folder_to_it.py
|
46fe3f13b3d24f03c4ad6e4479b9cd1613e66fe3
|
[] |
no_license
|
jaspalsingh92/TestAutomation-1
|
a48ee1d3b73386f1bf8f53328a5b55444238e054
|
e631c67255b10f150e0012991fb1474ede904417
|
refs/heads/master
| 2023-04-18T14:52:08.836221
| 2021-04-07T12:01:07
| 2021-04-07T12:01:07
| 357,175,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,848
|
py
|
import pytest
import logging
from Shared.API.secret import create_folder, get_folder, get_secrets_and_folders_in_folders,\
give_user_permissions_to_folder
from Shared.API.sets import SetsManager
logger = logging.getLogger('test')
@pytest.mark.api
@pytest.mark.pas
@pytest.mark.pasapi
@pytest.mark.bhavna
def test_member_add_single_folder_then_append_multilevel_folder_to_it(core_session,
pas_general_secrets,
cleanup_secrets_and_folders,
users_and_roles,
create_secret_folder):
"""
C3059: test method to Add single folder then append multilevel folder to it
1) create multilevel folder dogs/labradors/yellow inside a parent folder
2) Login as Admin, set folder permissions "View" for parent folder
3) Login as pas user
4) verify pas user can view all folders i.e. "folder1/dogs/labradors/yellow" & inherit permissions from parent.
:param core_session: Authenticated Centrify session
:param pas_general_secrets: Fixture to read secrets related data from yaml file
:param cleanup_secrets_and_folders: Fixture to cleanup the secrets & folders created
:param users_and_roles: Fixture to create random user with PAS User Rights
:param create_secret_folder: Fixture to create folder & yields folder details
"""
params = pas_general_secrets
folders_list = cleanup_secrets_and_folders[1]
folder_parameters = create_secret_folder
parent_folder_id = folder_parameters['ID']
# creating multilevel folder dogs/labradors/yellow
child_folder_success, child_folder_parameters, child_folder_id = create_folder(
core_session,
params['folder_multiple_level'],
params['description'],
parent=parent_folder_id)
assert child_folder_success, f'Failed to create multilevel folder, API response result: {child_folder_id}'
logger.info(f'Multilevel Folder created successfully, details are: {child_folder_parameters}')
# Getting details of Folder Labradors
labradors_folder = get_folder(core_session, child_folder_id)
logger.info(f'labradors folder details:{labradors_folder}')
labradors_folder_id = labradors_folder['Result']['Results'][0]['Row']['Parent']
# Getting id of Folder Dogs
dogs_folder = get_folder(core_session, labradors_folder_id)
logger.info(f'Dogs folder details:{dogs_folder}')
dogs_folder_id = dogs_folder['Result']['Results'][0]['Row']['Parent']
# API to get new session for User A
pas_power_user_session = users_and_roles.get_session_for_user('Privileged Access Service User')
assert pas_power_user_session.auth_details, 'Failed to Login with PAS User'
user_name = pas_power_user_session.auth_details['User']
user_id = pas_power_user_session.auth_details['UserId']
logger.info(f'User with PAS User Rights login successfully: user_Name:{user_name}')
# Api to give user permissions to parent folder
user_permissions_alpha = give_user_permissions_to_folder(core_session,
user_name,
user_id,
parent_folder_id,
'View')
assert user_permissions_alpha['success'], \
f'Not Able to set user permissions to folder, API response result:{user_permissions_alpha["Result"]}'
logger.info(f'User Permissions to folder: {user_permissions_alpha}')
# Getting id of Folder Dog
dog_folder = get_secrets_and_folders_in_folders(pas_power_user_session, parent_folder_id)
logger.info(f'Details of Dog Folder Retrieved with pas user:{dog_folder}')
dog_id = dog_folder["Result"]["Results"][0]["Entities"][0]["Key"]
assert dog_id == dogs_folder_id, \
f'Failed to view dog folder with pas user, API response result:{dog_folder["success"]} & {dog_folder["Result"]}'
# Getting id of parent folder
labradors_folder = get_secrets_and_folders_in_folders(pas_power_user_session, dog_id)
logger.info(f'Details of labradors Folder Retrieved with pas user:{labradors_folder}')
labradors_id = labradors_folder["Result"]["Results"][0]["Entities"][0]["Key"]
assert labradors_id == labradors_folder_id, \
f'Failed to view labradors folder with pas user, API response result:' \
f'{labradors_folder["success"]} & {labradors_folder["Result"]}'
# Getting id of parent folder
yellow_folder = get_secrets_and_folders_in_folders(pas_power_user_session, labradors_id)
logger.info(f'Details of yellow Folder Retrieved with pas user:{yellow_folder}')
yellow_id = yellow_folder["Result"]["Results"][0]["Entities"][0]["Key"]
assert \
yellow_id == child_folder_id, f' Failed to view yellow folder with pas user, API response result:' \
f'{yellow_folder["success"]} & {yellow_folder["Result"]}'
# Getting permissions of the folder yellow(should inherit from parent)
permissions_yellow = SetsManager.get_collection_rights(pas_power_user_session, child_folder_id)
verify_permissions_all = 'View'
assert verify_permissions_all == permissions_yellow["Result"], \
f'Failed to verify permissions for the folder, API response result:{permissions_yellow["Result"]}'
logger.info(f'Permissions of the folder created: {permissions_yellow}')
# cleanup of folders accordingly
folders_list.insert(0, child_folder_id)
folders_list.insert(1, labradors_folder_id)
folders_list.insert(2, dogs_folder_id)
|
[
"singh.jaspal92@gmail.com"
] |
singh.jaspal92@gmail.com
|
bbbb3283bf2aa13ee7af8d20d65b760414fc42f7
|
08e2ed7fb3a3080c8cdc46cf7e4cbb2a6e60f90a
|
/src/game_object/components/image_component.py
|
20c402c520e23640a7a37d13e1d5c3a1f98ea467
|
[] |
no_license
|
thydungeonsean/_rainbowmancer
|
1630b60983719dde77cd1dea267dd15dde855c38
|
cebaf66f5c69f60f8b6c38492f19b8f1e32f73fe
|
refs/heads/master
| 2021-04-28T07:35:06.183408
| 2018-03-19T19:55:47
| 2018-03-19T19:55:47
| 122,226,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,271
|
py
|
from game_object_component import GameObjectComponent
from src.data_structures.vector import Vector
from src.image.tile_image import TileImage
class ImageComponent(GameObjectComponent):
A = 0
B = 1
def __init__(self, owner, image_id, animated=True):
GameObjectComponent.__init__(self, owner)
self.image_id = image_id
self.images = self.load_images(animated)
@property
def frame(self):
return self.game_state.frame
@property
def color_id(self):
return self.images[0].color_id
def load_images(self, animated):
images = {
ImageComponent.A: TileImage(self.image_id),
ImageComponent.B: TileImage(self.image_id, animated_frame=animated)
}
return images
def position(self, (x, y)):
for i in (0, 1):
self.images[i].position((x, y))
def draw(self, surface):
image = self.images[self.frame]
image.draw(surface)
def change_color(self, new_color):
if new_color != self.color_id:
for i in (0, 1):
self.images[i].change_color(new_color)
@property
def w(self):
return self.images[0].w
@property
def h(self):
return self.images[0].h
|
[
"marzecsean@gmail.com"
] |
marzecsean@gmail.com
|
6295e3e515ae5835cbede861390b080d25f8b017
|
a3a898a42049da56bbda00adf4cd781f4ffcce5f
|
/ut.py
|
661e7e7207fdf62cf6f932036333775b0dad9df4
|
[] |
no_license
|
zhanglintc/algorithm
|
682b282b952a4db393c5f2aecaf9d3e7c792d635
|
5bf955aa62ca7728a26fc0613940839121876f29
|
refs/heads/main
| 2023-03-21T20:00:03.989144
| 2021-03-24T10:03:35
| 2021-03-24T10:03:35
| 349,449,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,969
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# -*- mode: python -*-
# vi: set ft=python :
import unittest
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('######################################################################')
print("{0}:".format(cls.__name__))
class Base64Test(BaseTestCase):
def test_base64(self):
import base64 as base64
import b64.b64 as b64
# encode
for c in 'AZaz09+-':
self.assertEqual(base64.b64encode(c.encode()), b64.base64_encode(c.encode()))
self.assertEqual(base64.b64encode(b'Man'), b64.base64_encode(b'Man'))
self.assertEqual(base64.b64encode(b'any carnal pleasure.'), b64.base64_encode(b'any carnal pleasure.'))
# decode
self.assertEqual(base64.b64decode('QQ=='), b64.base64_decode('QQ=='))
self.assertEqual(base64.b64decode('TWFu'), b64.base64_decode('TWFu'))
class AlgorithmSortTest(BaseTestCase):
def __new__(cls, *args, **kwargs):
import sort.sort as _sort
cls._sort = _sort
import random
cls._array = [random.randint(0, 100000) for _ in range(3000)]
instance = super(cls.__class__, cls).__new__(cls)
return instance
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
# @unittest.skip('pass')
def sort_test_helper(self, func):
print()
pairs = [
([], []),
([1], [1]),
([x for x in range(1000, -1, -1)], [x for x in range(1000, -1, -1)]),
(self._array[:], self._array[:]),
]
for pair in pairs:
by_testee, by_system = pair
func(by_testee); by_system.sort()
self.assertEqual(by_testee, by_system)
def test_swap_sort(self):
self.sort_test_helper(self._sort.swap_sort)
def test_bubble_sort(self):
self.sort_test_helper(self._sort.bubble_sort)
def test_selection_sort(self):
self.sort_test_helper(self._sort.selection_sort)
def test_insertion_sort(self):
self.sort_test_helper(self._sort.insertion_sort)
def test_shell_sort(self):
self.sort_test_helper(self._sort.shell_sort)
def test_heap_sort(self):
self.sort_test_helper(self._sort.heap_sort)
def test_merge_sort(self):
self.sort_test_helper(self._sort.merge_sort)
def test_quick_sort(self):
self.sort_test_helper(self._sort.quick_sort)
def test_counting_sort(self):
self.sort_test_helper(self._sort.counting_sort)
self.sort_test_helper(self._sort.counting_sort_stable)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
load_class = unittest.TestLoader().loadTestsFromTestCase
suites = [
load_class(Base64Test),
load_class(AlgorithmSortTest),
]
result = [runner.run(suite) for suite in suites]
list(map(print, result))
|
[
"zhanglintc623@gmail.com"
] |
zhanglintc623@gmail.com
|
1d4be0626cf2e87afbb3890b9c2b4fdd4d4312e2
|
a2b6bc9bdd2bdbe5871edb613065dd2397175cb3
|
/简单/二叉搜索树中的众数.py
|
e059cf51f0fc83293f2a71e5defab5ed13b51d62
|
[] |
no_license
|
Asunqingwen/LeetCode
|
ed8d2043a31f86e9e256123439388d7d223269be
|
b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee
|
refs/heads/master
| 2022-09-26T01:46:59.790316
| 2022-09-01T08:20:37
| 2022-09-01T08:20:37
| 95,668,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
'''
给定一个有相同值的二叉搜索树(BST),找出 BST 中的所有众数(出现频率最高的元素)。
假定 BST 有如下定义:
结点左子树中所含结点的值小于等于当前结点的值
结点右子树中所含结点的值大于等于当前结点的值
左子树和右子树都是二叉搜索树
例如:
给定 BST [1,null,2,2],
1
\
2
/
2
返回[2].
提示:如果众数超过1个,不需考虑输出顺序
进阶:你可以不使用额外的空间吗?(假设由递归产生的隐式调用栈的开销不被计算在内)
'''
from typing import List
from Tree import TreeNode, stringToTreeNode
from collections import defaultdict
class Solution:
def findMode(self, root: TreeNode) -> List[int]:
res = []
if not root:
return res
hashMap = defaultdict(int)
def dfs(node):
if not node:
return
if node.left:
dfs(node.left)
hashMap[node.val] += 1
if node.right:
dfs(node.right)
dfs(root)
max_count = max(hashMap.values())
for k, v in hashMap.items():
if v == max_count:
res.append(k)
return res
if __name__ == '__main__':
nums = "2147483647"
root = stringToTreeNode(nums)
sol = Solution()
print(sol.findMode(root))
|
[
"sqw123az@sina.com"
] |
sqw123az@sina.com
|
d703574693e9918ade0ab71c8afb819c584424ab
|
cb8431a306af2fabf37b74f68b5bd3fdc4cae134
|
/etlt/dimension/RegularDimension.py
|
9eca77137611940200b8f332dbd90e76a4cd545a
|
[
"MIT"
] |
permissive
|
e7dal/py-etlt
|
fb5fcc25cd5ab33c6d02f37ab8421aefe877753c
|
1c5b8ea60293c14f54d7845a9fe5c595021f66f2
|
refs/heads/master
| 2020-05-16T19:12:27.838844
| 2019-04-10T10:02:22
| 2019-04-10T10:02:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,834
|
py
|
"""
ETLT
Copyright 2016 Set Based IT Consultancy
Licence MIT
"""
import abc
class RegularDimension(metaclass=abc.ABCMeta):
"""
Abstract parent class for translating natural key to a technical key of a regular dimension.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Object constructor.
"""
self._map = {}
"""
The map from natural keys to a technical keys.
:type: dict[T, int|None]
"""
# Pre-load look up data in to the map.
self.pre_load_data()
# ------------------------------------------------------------------------------------------------------------------
def get_id(self, natural_key, enhancement=None):
"""
Returns the technical ID for a natural key or None if the given natural key is not valid.
:param T natural_key: The natural key.
:param T enhancement: Enhancement data of the dimension row.
:rtype: int|None
"""
# If the natural key is known return the technical ID immediately.
if natural_key in self._map:
return self._map[natural_key]
# The natural key is not in the map of this dimension. Call a stored procedure for translating the natural key
# to a technical key.
self.pre_call_stored_procedure()
success = False
try:
key = self.call_stored_procedure(natural_key, enhancement)
success = True
finally:
self.post_call_stored_procedure(success)
# Add the translation for natural key to technical ID to the map.
self._map[natural_key] = key
return key
# ------------------------------------------------------------------------------------------------------------------
@abc.abstractmethod
def call_stored_procedure(self, natural_key, enhancement):
"""
Calls a stored procedure for getting the technical key of a natural key. Returns the technical ID or None if
the given natural key is not valid.
:param T natural_key: The natural key.
:param T enhancement: Enhancement data of the dimension row.
:rtype: int|None
"""
raise NotImplementedError()
# ------------------------------------------------------------------------------------------------------------------
def pre_load_data(self):
"""
Can be overridden to pre-load lookup data from a dimension table.
:rtype: None
"""
pass
# ------------------------------------------------------------------------------------------------------------------
def pre_call_stored_procedure(self):
"""
This method is invoked before call the stored procedure for getting the technical key of a natural key.
In a concurrent environment override this method to acquire a lock on the dimension or dimension hierarchy.
:rtype: None
"""
pass
# ------------------------------------------------------------------------------------------------------------------
def post_call_stored_procedure(self, success):
"""
This method is invoked after calling the stored procedure for getting the technical key of a natural key.
In a concurrent environment override this method to release a lock on the dimension or dimension hierarchy and
to commit or rollback the transaction.
:param bool success: True: the stored procedure is executed successfully. False: an exception has occurred.
:rtype: None
"""
pass
# ----------------------------------------------------------------------------------------------------------------------
|
[
"p.r.water@setbased.nl"
] |
p.r.water@setbased.nl
|
d8f484ff8803152cae0d30c0990aa7841c72c689
|
cad2908abb7b2a649ec2382309f56e6c95ee834a
|
/course3/principles/inheritance.py
|
dcfa43ec8c6726bfa54b00ffde8e352d592ec16b
|
[] |
no_license
|
andyzt/tceh-python
|
619b6eec0897e3b3671d416d6eb7346f69730747
|
de74cb7fffea3528cd2a3035b0a9d53c9dca0c6b
|
refs/heads/master
| 2021-01-16T00:17:02.198730
| 2016-02-26T16:00:16
| 2016-02-26T16:00:16
| 52,616,472
| 4
| 3
| null | 2016-02-26T16:15:06
| 2016-02-26T16:15:06
| null |
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
__author__ = 'sobolevn'
class Parent(object):
def __init__(self):
print('Parent inited')
self.value = 'Parent'
def do(self):
print('Parent do(): %s' % self.value)
@staticmethod
def static_do():
print('Parent static_do()')
@classmethod
def class_do(cls):
print('Parent class_do(): %s' % cls)
class Child(Parent):
def __init__(self):
super(Child, self).__init__()
print('Child inited')
self.value = 'Child'
@staticmethod
def static_do():
print('Child static_do()')
class Mixin(object):
@classmethod
def class_do(cls):
print('Mixed: %s' % cls)
class MixedChildOne(Parent, Mixin):
pass
class MixedChildTwo(Mixin, Parent):
pass
class MixedChildThree(Parent, Mixin):
@classmethod
def class_do(cls):
Mixin.class_do()
if __name__ == '__main__':
Parent.static_do()
Parent.class_do()
parent = Parent()
parent.do()
Parent.do(parent) # do not use this!
parent.class_do()
parent.static_do()
parent.__class__.class_do()
parent.__class__.static_do()
# Child:
Child.static_do()
Child.class_do()
child = Child()
child.do()
# Mixins:
mixin1 = MixedChildOne()
mixin1.class_do()
print(mixin1.__class__.__mro__)
mixin2 = MixedChildTwo()
mixin2.class_do()
print(mixin2.__class__.__mro__)
mixin3 = MixedChildThree()
mixin3.class_do()
print(mixin3.__class__.__mro__)
|
[
"mail@sobolevn.me"
] |
mail@sobolevn.me
|
83cafd706a1d76e745e3773b8311bb5fe4844a10
|
594bd1d3afa4c74c577e6c5d7f8e71d4835c7734
|
/MainApp/migrations/0093_auto_20161118_1232.py
|
a9c950e213543791b6e39e28c16dcc7152a95d9b
|
[] |
no_license
|
CoriAle/app
|
5a930b4460a5a79e4d2d97b0de205c050c196a53
|
280313f86db0ba9a754ff52dc8a37bf6420554d1
|
refs/heads/master
| 2023-01-23T22:45:15.127029
| 2018-07-03T01:59:19
| 2018-07-03T01:59:19
| 136,048,276
| 0
| 0
| null | 2023-01-12T05:12:39
| 2018-06-04T15:49:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 481
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-11-18 18:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MainApp', '0092_remove_pagopersonal_vales'),
]
operations = [
migrations.AlterField(
model_name='persona',
name='fecha_pago',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
[
"1995coral@hotmail.es"
] |
1995coral@hotmail.es
|
444343a37dcff36a26a64f7da7ac99a29f078c08
|
0b4d1fb57546adbc85659a144742c4ecd9dfe219
|
/src/genie/libs/parser/ios/tests/ShowInterfacesSwitchport/cli/equal/golden_output_2_expected.py
|
5515be0b5806e40b6a2d632d373a55a7ec3474a5
|
[
"Apache-2.0"
] |
permissive
|
oboehmer/genieparser
|
dcc4fd0c6611ab4d799928ce6d2b55a2ad7a64d2
|
e88d02c08a3968d38ba90121b46af614715c5ecc
|
refs/heads/master
| 2021-07-11T17:04:11.198119
| 2020-12-02T20:34:24
| 2020-12-02T20:34:24
| 222,627,198
| 1
| 0
|
Apache-2.0
| 2019-11-19T06:43:15
| 2019-11-19T06:43:14
| null |
UTF-8
|
Python
| false
| false
| 1,826
|
py
|
expected_output = {
"Port-channel12": {
"operational_mode": "trunk",
"switchport_mode": "trunk",
"access_vlan_name": "default",
"private_vlan": {},
"switchport_enable": True,
"native_vlan_tagging": True,
"negotiation_of_trunk": False,
"encapsulation": {
"native_vlan": "1",
"native_vlan_name": "default",
"operational_encapsulation": "dot1q",
"administrative_encapsulation": "dot1q",
},
"port_channel": {
"port_channel_member_intfs": ["TenGigabitEthernet1/1/2"],
"port_channel_member": True,
},
"pruning_vlans": "2-1001",
"access_vlan": "1",
"unknown_multicast_blocked": False,
"trunk_vlans": "1,111,130,131,400,405,410,420,430,439-442,450,451,460,",
"unknown_unicast_blocked": False,
},
"TenGigabitEthernet1/1/2": {
"access_vlan": "1",
"operational_mode": "trunk",
"switchport_mode": "trunk",
"access_vlan_name": "default",
"switchport_enable": True,
"private_vlan": {},
"capture_mode": False,
"trunk_vlans": "1,111,130,131,400,405,410,420,430,439-442,450,451,460,",
"capture_vlans": "all",
"negotiation_of_trunk": False,
"unknown_multicast_blocked": False,
"port_channel": {
"port_channel_int": "Port-channel12",
"port_channel_member": True,
},
"native_vlan_tagging": True,
"encapsulation": {
"native_vlan": "1",
"native_vlan_name": "default",
"operational_encapsulation": "dot1q",
"administrative_encapsulation": "dot1q",
},
"unknown_unicast_blocked": False,
"pruning_vlans": "2-1001",
},
}
|
[
"ken@celenza.org"
] |
ken@celenza.org
|
04eed828b4817621a1725d5e816cab6a74e057de
|
bdb2506fb9562005c2f1b4c88330fa108f6219db
|
/appliedunis/urls.py
|
c7c7f205168a25ffeb28c2ff5a8af1b3568084a0
|
[] |
no_license
|
naeem23/University-Admission-Assistant---UAA
|
abc68766585d8a4e69de142cd077ad3a1c372162
|
744f14b5bbdd1ff96c6a01967946278813a5b6db
|
refs/heads/master
| 2023-05-13T00:15:11.138977
| 2019-08-04T13:24:58
| 2019-08-04T13:24:58
| 200,491,140
| 1
| 0
| null | 2023-04-21T20:35:21
| 2019-08-04T12:39:09
|
Python
|
UTF-8
|
Python
| false
| false
| 372
|
py
|
from .import views
from django.contrib.auth.decorators import login_required
from django.urls import path, re_path
app_name = 'appliedunis'
urlpatterns = [
path('api/cancel', login_required(views.cancelApi), name='delete_api'),
path('api/apply/', login_required(views.applyApi), name='api_uni'),
path('api/read/', login_required(views.markAsRead), name='read'),
]
|
[
"sddqnaeem@gmail.com"
] |
sddqnaeem@gmail.com
|
99178d3942f39b0b5f1f5d1954c4b20943bef419
|
ff6f60d02ed8d024f7b2db5c9eb4b1196ebf166b
|
/my_flask/app/models/book.py
|
1d81876b0b33e29edad6d19583515c95e0fcb3ff
|
[] |
no_license
|
cekong/learnit
|
43b707e347ff552754b6592e01dd106c98cd0cc5
|
b4111d6fee95960f7b7ca5421b7159cb6122ad2a
|
refs/heads/master
| 2020-03-25T13:53:37.848843
| 2019-08-29T06:46:48
| 2019-08-29T06:46:48
| 143,848,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
''''''
'''
模型层
https://coding.imooc.com/lesson/194.html#mid=12779 4-8 定义第一个模型类
sqlalchemy
Flask_SQLAlchemy
SQLAlchemy 是Python 社区最知名的 ORM 工具之一,为高效和高性能的数据库访问设计,
实现了完整的企业级持久模型。
ORM(对象关系映射)将数据库中的表与面向对象语言中的类建立了一种对应关系。
'''
from sqlalchemy import Column,Integer,String,Time
from app.models.base import db,Base
#用代码创建数据表
class Book(Base):
id=Column(Integer,primary_key=True,autoincrement=True)
#autoincrement自增长
title=Column(String(50),nullable=False)#nullable设置此值不为空
author=Column(String(30),default='佚名')#当此值为空时,默认设置为佚名
binding = Column(String(20))
publisher=Column(String(50))
price=Column(String(30))
pages=Column(Integer)
pubdate=Column(String(20))
isbn = Column(String(15),nullable=False,unique=True)#unique此值唯一,不能重复
summary=Column(String(1000))
image=Column(String(50))
image = Column(String(50))
#MVC M Model 只有数据
#ORM 对象关系映射
def sample(self):
pass
|
[
"noreply@github.com"
] |
cekong.noreply@github.com
|
90a34921333a0a6e00b6e543ba5c3a07f2e7af0c
|
2f62291080c180e1f65c15ca300c66e7b75605d3
|
/comment/admin.py
|
56494bcff01c33399c28e260394c102aee73a006
|
[] |
no_license
|
XiaoFei-97/NewBLogSite
|
9c3d2a4121a2fd8bc3fe8f2ad42ae1caf297109e
|
8f878173eaba82073932811357724536a4c6949f
|
refs/heads/master
| 2020-04-13T02:11:40.620117
| 2019-03-11T07:41:26
| 2019-03-11T07:41:26
| 162,896,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
from django.contrib import admin # admin后台管理
from .models import Comment # 从当前应用的模型中导入Comment数据表
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
# 后台显示文章对象,评论内容,评论时间,评论者
list_display = ('id', 'content_object', 'text', 'comment_time', 'user')
|
[
"jack_970124@163.com"
] |
jack_970124@163.com
|
3947ab0da8b6fc23714ddc19616210640432a080
|
2693c54a5243bb991f5e9ac6aa75b4ce43e3bb22
|
/forkan/rl/envs/vae_stack.py
|
2fd62da241c285a19d1d41fb8779850af24e72d8
|
[
"Unlicense"
] |
permissive
|
llach/forkan
|
36f50eda62153b043ec5a6e10513347117635ad9
|
33ae3d48ce6f24fc0c254b93ed3f4b8a767ffea5
|
refs/heads/master
| 2020-04-01T06:47:26.034989
| 2019-07-21T13:43:03
| 2019-07-21T13:43:03
| 152,964,129
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
import logging
import numpy as np
from collections import deque
from forkan.models import VAE
from gym import spaces
from forkan.rl import EnvWrapper
class VAEStack(EnvWrapper):
def __init__(self,
env,
load_from,
k=3,
vae_network='pendulum',
**kwargs,
):
self.logger = logging.getLogger(__name__)
# inheriting from EnvWrapper and passing it an env makes spaces available.
super().__init__(env)
self.k = k
self.v = VAE(load_from=load_from, network=vae_network)
self.observation_space = spaces.Box(low=-np.infty, high=np.infty, shape=(self.v.latent_dim*self.k,),
dtype=np.float)
self.vae_name = self.v.savename
self.q = deque(maxlen=self.k)
self._reset_queue()
def _reset_queue(self):
for _ in range(self.k):
self.q.appendleft([0]*self.v.latent_dim)
def _process(self, obs):
mus, _, _ = self.v.encode(np.expand_dims(obs, 0))
self.q.appendleft(np.squeeze(mus))
def _get_obs(self):
return np.asarray(self.q).flatten()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._process(obs)
return self._get_obs(), reward, done, info
def reset(self):
self._reset_queue()
obs = self.env.reset()
self._process(obs)
return self._get_obs()
|
[
"llach@techfak.uni-bielefeld.de"
] |
llach@techfak.uni-bielefeld.de
|
4ea1c3b4e147e92d48a3e0a9fe66894514555851
|
3649dce8b44c72bbfee56adf4e29ca6c5ba2703a
|
/code_up1440.py
|
17945c10ff51ccfa95b13dbf7aa8299ec216180e
|
[] |
no_license
|
beOk91/code_up
|
03c7aca76e955e3a59d797299749e7fc2457f24a
|
ca1042ce216cc0a80e9b3d3ad363bc29c4ed7690
|
refs/heads/master
| 2022-12-06T08:23:00.788315
| 2020-08-20T11:21:59
| 2020-08-20T11:21:59
| 284,844,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
num=int(input())
num_list=list(map(int,input().strip().split()))
for i in range(num):
print("{}:".format(i+1),end=" ")
for j in range(num):
if i!=j:
if num_list[i]<num_list[j]:
print("<",end=" ")
elif num_list[i]==num_list[j]:
print("=",end=" ")
else:
print(">",end=" ")
print()
|
[
"be_ok91@naver.com"
] |
be_ok91@naver.com
|
a55baf3e9516d59c3250ca8f0d14b799a6376e0d
|
9f2445e9a00cc34eebcf3d3f60124d0388dcb613
|
/2019-12-10-Na_Chan_del_segfault/seg_fault.py
|
78fe3fba59fe42bf74641ff0185ff0171b865e62
|
[] |
no_license
|
analkumar2/Thesis-work
|
7ee916d71f04a60afbd117325df588908518b7d2
|
75905427c2a78a101b4eed2c27a955867c04465c
|
refs/heads/master
| 2022-01-02T02:33:35.864896
| 2021-12-18T03:34:04
| 2021-12-18T03:34:04
| 201,130,673
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
# exec(open('seg_fault.py').read())
import moose
import pylab
import rdesigneur as rd
# Wrapper function so that the model can be build and run again and again
def rdeswrapper():
# Deleting any previous run of the model
try:
# [moose.delete(x) for x in ['/model', '/library']]
moose.delete('/model')
except:
pass
######################################
rdes = rd.rdesigneur(
chanProto = [['make_HH_Na()', 'Na'], ['K_A_Chan_(Migliore2018)_ghk.K_A_Chan()', 'K']],
chanDistrib = [
['K', 'soma', 'Gbar', '2000' ],
['Na', 'soma', 'Gbar', '100' ],],
stimList = [['soma', '1', '.', 'inject', '(t>0.1 && t<0.2) * 1e-8' ]],
plotList = [['soma', '1', '.', 'Vm', 'Membrane potential']]
)
rdes.buildModel()
moose.reinit()
moose.start( 0.3 )
rdes.display()
return rdes
# # Initial run
# print('Initial run')
# rdeswrapper()
# Delete library and run
moose.delete('/library')
print('After libsrary deletion and re-build and re-run')
rdeswrapper()
# Delete Na and run
moose.delete('/library/Na')
print('After libsrary/Na deletion and re-build and re-run')
rdeswrapper()
|
[
"analkumar2@gmail.com"
] |
analkumar2@gmail.com
|
70a98c32d3373e086b562b057378936237a6b801
|
674f5dde693f1a60e4480e5b66fba8f24a9cb95d
|
/armulator/armv6/opcodes/concrete/ldrd_literal_t1.py
|
a9c906d27b400388948fff874fff702b14d09748
|
[
"MIT"
] |
permissive
|
matan1008/armulator
|
75211c18ebc9cd9d33a02890e76fc649483c3aad
|
44f4275ab1cafff3cf7a1b760bff7f139dfffb07
|
refs/heads/master
| 2023-08-17T14:40:52.793120
| 2023-08-08T04:57:02
| 2023-08-08T04:57:02
| 91,716,042
| 29
| 7
|
MIT
| 2023-08-08T04:55:59
| 2017-05-18T16:37:55
|
Python
|
UTF-8
|
Python
| false
| false
| 605
|
py
|
from armulator.armv6.bits_ops import substring, bit_at
from armulator.armv6.opcodes.abstract_opcodes.ldrd_literal import LdrdLiteral
class LdrdLiteralT1(LdrdLiteral):
@staticmethod
def from_bitarray(instr, processor):
imm8 = substring(instr, 7, 0)
rt2 = substring(instr, 11, 8)
rt = substring(instr, 15, 12)
add = bit_at(instr, 23)
imm32 = imm8 << 2
if rt == rt2 or rt in (13, 15) or rt2 in (13, 15) or bit_at(instr, 21):
print('unpredictable')
else:
return LdrdLiteralT1(instr, add=add, imm32=imm32, t=rt, t2=rt2)
|
[
"matan1008@gmail.com"
] |
matan1008@gmail.com
|
a0b81a218a73b11bc6ba7b85118f466015bc7b86
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/python/util_rules/ancestor_files_test.py
|
fad18565bb0bc12f8bb70ddb75d2d389ba6ec0f4
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,232
|
py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.backend.python.util_rules import ancestor_files
from pants.backend.python.util_rules.ancestor_files import (
AncestorFiles,
AncestorFilesRequest,
putative_ancestor_files,
)
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*ancestor_files.rules(),
QueryRule(AncestorFiles, (AncestorFilesRequest,)),
]
)
def assert_injected(
rule_runner: RuleRunner,
*,
input_files: list[str],
empty_files: list[str],
nonempty_files: list[str],
expected_discovered: list[str],
ignore_empty_files: bool,
) -> None:
rule_runner.write_files({**{f: "" for f in empty_files}, **{f: "foo" for f in nonempty_files}})
request = AncestorFilesRequest(
requested=("__init__.py",),
input_files=tuple(input_files),
ignore_empty_files=ignore_empty_files,
)
result = rule_runner.request(AncestorFiles, [request]).snapshot
assert list(result.files) == sorted(expected_discovered)
@pytest.mark.parametrize("ignore_empty_files", [False, True])
def test_rule(rule_runner: RuleRunner, ignore_empty_files: bool) -> None:
assert_injected(
rule_runner,
input_files=[
"src/python/project/lib.py",
"src/python/project/subdir/__init__.py",
"src/python/project/subdir/lib.py",
"src/python/no_init/lib.py",
],
nonempty_files=[
"src/python/__init__.py",
"tests/python/project/__init__.py",
],
empty_files=["src/python/project/__init__.py"],
ignore_empty_files=ignore_empty_files,
expected_discovered=(
["src/python/__init__.py"]
+ ([] if ignore_empty_files else ["src/python/project/__init__.py"])
),
)
def test_identify_missing_ancestor_files() -> None:
assert {
"__init__.py",
"a/__init__.py",
"a/b/__init__.py",
"a/b/c/d/__init__.py",
} == putative_ancestor_files(
requested=("__init__.py",),
input_files=("a/b/foo.py", "a/b/c/__init__.py", "a/b/c/d/bar.py", "a/e/__init__.py"),
)
assert {
"__init__.py",
"src/__init__.py",
"src/python/__init__.py",
"src/python/a/__init__.py",
"src/python/a/b/__init__.py",
"src/python/a/b/c/d/__init__.py",
} == putative_ancestor_files(
requested=("__init__.py",),
input_files=(
"src/python/a/b/foo.py",
"src/python/a/b/c/__init__.py",
"src/python/a/b/c/d/bar.py",
"src/python/a/e/__init__.py",
),
)
assert putative_ancestor_files(requested=("f.py", "f.pyi"), input_files=("subdir/foo.py",)) == {
"f.py",
"f.pyi",
"subdir/f.py",
"subdir/f.pyi",
}
assert putative_ancestor_files(
requested=("f.py", "f.pyi"), input_files=("subdir/foo.pyi",)
) == {"f.py", "f.pyi", "subdir/f.py", "subdir/f.pyi"}
|
[
"noreply@github.com"
] |
pantsbuild.noreply@github.com
|
53bfabebe006a235d28336b4fc86a262baa2081b
|
4e04db11d891f869a51adf0e0895999d425f29f6
|
/portalbackend/lendapi/reporting/migrations/0002_auto_20170824_1910.py
|
0de31b6204ded3007b1ffb8f42d330f6c538b71c
|
[] |
no_license
|
mthangaraj/ix-ec-backend
|
21e2d4b642c1174b53a86cd1a15564f99985d23f
|
11b80dbd665e3592ed862403dd8c8d65b6791b30
|
refs/heads/master
| 2022-12-12T12:21:29.237675
| 2018-06-20T13:10:21
| 2018-06-20T13:10:21
| 138,033,811
| 0
| 0
| null | 2022-06-27T16:54:14
| 2018-06-20T13:04:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 859
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-24 19:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reporting', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='financialstatemententry',
name='item_category',
),
migrations.RemoveField(
model_name='financialstatemententry',
name='item_name',
),
migrations.AddField(
model_name='financialstatemententry',
name='statement_type',
field=models.CharField(choices=[('Income Statement', 'Income Statement'), ('Balance Sheet', 'Balance Sheet'), ('Cash Flow', 'Cash Flow')], default='Income Statement', max_length=60),
),
]
|
[
"thangaraj.matheson@ionixxtech.com"
] |
thangaraj.matheson@ionixxtech.com
|
a59222150d7b44cad7f9073542b3a3d9527d9baa
|
e6ec89f4e40b6ef7183ef76bf542f683154dea03
|
/django_tutorial/settings.py
|
5970db695f93dc3410ab4bc3fb20d3ca7ab6c0e1
|
[] |
no_license
|
shoark7/django-2.0-tutorial
|
a07919048dd22f5fe295713f6e46003ab8b4a57a
|
ccdae9ebc3d383145b0aa19227ff986b8d06cf93
|
refs/heads/master
| 2021-04-26T23:57:53.816519
| 2018-03-06T07:21:36
| 2018-03-06T07:21:36
| 123,886,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
"""
Django settings for django_tutorial project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v&)bevqcv7(8xre6%qy*%a4imbut_5@ndwfeegkqhr3gu)a4$f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls.apps.PollsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"shoark7@gmail.com"
] |
shoark7@gmail.com
|
a24a8cb7958e6472091d3698f75ebcb279a536e7
|
f115984d89ee91e1fefa7bd0546f60db251dfee6
|
/model-cell-experiments/predict-mc.py
|
6504f4c534af5e600136a6c43be59dcfafaca1e8
|
[
"BSD-3-Clause"
] |
permissive
|
CardiacModelling/VoltageClampModel
|
f483fc3ad2129f75e377df210b9b91b1cdcb7565
|
f30271da75e3c70526e53fb51dc12b317ab3b714
|
refs/heads/master
| 2023-07-05T10:07:59.771334
| 2021-03-03T11:05:35
| 2021-03-03T11:05:35
| 227,666,074
| 3
| 0
|
BSD-3-Clause
| 2021-03-03T11:04:46
| 2019-12-12T17:58:13
|
Python
|
UTF-8
|
Python
| false
| false
| 4,079
|
py
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('../lib/')
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pints
import model as m; m.vhold = 0
"""
Prediction for single model cell experiment data
"""
predict_list = ['staircase', 'sinewave', 'ap-beattie', 'ap-lei']
data_idx = {'staircase': 1, 'sinewave': 0, 'ap-beattie': 2, 'ap-lei': 3}
protocol_list = {
'staircase': 'staircase-ramp.csv',
'sinewave': 'sinewave-ramp.csv',
'ap-beattie': 'ap-beattie.csv',
'ap-lei': 'ap-lei.csv'}
legend_ncol = {
'staircase': (2, 1),
'sinewave': (1, 1),
'ap-beattie': (4, 2),
'ap-lei': (4, 2)}
try:
which_predict = sys.argv[1]
except:
print('Usage: python %s [str:which_predict]' % os.path.basename(__file__))
sys.exit()
if which_predict not in predict_list:
raise ValueError('Input data %s is not available in the predict list' \
% which_predict)
savedir = './figs'
if not os.path.isdir(savedir):
os.makedirs(savedir)
# Load data
path2data = '../../model-cell-dataset/'
sys.path.append(path2data)
import util
idx = [0, data_idx[which_predict], 0]
f = 'data/20191002_mc_nocomp.dat'
whole_data, times = util.load(f, idx, vccc=True)
if which_predict == 'staircase':
to_plot = np.where(times < 15.2)[0]
for i in range(len(whole_data)):
whole_data[i] = whole_data[i][to_plot]
times = times[to_plot]
times = times * 1e3 # s -> ms
data_cc = whole_data[2] * 1e3 # V -> mV
data_vc = whole_data[1] * 1e3 # V -> mV
data = (whole_data[0] + whole_data[3]) * 1e12 # A -> pA
#out = np.array([times * 1e-3, data_vc]).T
#np.savetxt('recorded-voltage.csv', out, delimiter=',', comments='',
# header='\"time\",\"voltage\"')
saveas = 'mcnocomp'
# Model
model = m.Model('../mmt-model-files/full2-voltage-clamp-mc.mmt',
protocol_def=protocol_list[which_predict],
temperature=273.15 + 23.0, # K
transform=None,
readout='voltageclamp.Iout',
useFilterCap=False)
parameters = [
'mc.g',
'voltageclamp.cprs',
'membrane.cm',
'voltageclamp.rseries',
'voltageclamp.voffset_eff',
]
model.set_parameters(parameters)
parameter_to_fix = [
'voltageclamp.cprs_est',
'voltageclamp.cm_est',
'voltageclamp.rseries_est',
]
parameter_to_fix_values = [
0., # pF; Cprs*
0.0, # pF; Cm*
0, # GOhm; Rs*
]
fix_p = {}
for i, j in zip(parameter_to_fix, parameter_to_fix_values):
fix_p[i] = j
model.set_fix_parameters(fix_p)
# Load parameters
loaddir = './out'
loadas = 'mcnocomp'
fit_seed = 542811797
p = np.loadtxt('%s/%s-solution-%s-1.txt' % (loaddir, loadas, fit_seed))
current_label = 'Fit' if which_predict == 'staircase' else 'Prediction'
# Simulate
extra_log = ['voltageclamp.Vc', 'membrane.V']
simulation = model.simulate(p, times, extra_log=extra_log)
Iout = simulation['voltageclamp.Iout']
Vc = simulation['voltageclamp.Vc']
Vm = simulation['membrane.V']
# Plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(14, 4))
axes[0].plot(times, data_vc, c='#a6bddb', label=r'Measured $V_{cmd}$')
axes[0].plot(times, data_cc, c='#feb24c', label=r'Measured $V_{m}$')
axes[0].plot(times, Vc, ls='--', c='#045a8d', label=r'Input $V_{cmd}$')
axes[0].plot(times, Vm, ls='--', c='#bd0026', label=r'Predicted $V_{m}$')
axes[0].set_ylabel('Voltage (mV)', fontsize=14)
#axes[0].set_xticks([])
axes[0].legend(ncol=legend_ncol[which_predict][0])
axes[1].plot(times, data, alpha=0.5, label='Measurement')
axes[1].plot(times, Iout, ls='--', label=current_label)
axes[1].set_ylim([-800, 1200]) # TODO?
axes[1].legend(ncol=legend_ncol[which_predict][1])
axes[1].set_ylabel('Current (pA)', fontsize=14)
axes[1].set_xlabel('Time (ms)', fontsize=14)
plt.subplots_adjust(hspace=0)
plt.savefig('%s/predict-%s-%s.pdf' % (savedir, saveas, which_predict),
format='pdf', bbox_inches='tight')
plt.savefig('%s/predict-%s-%s' % (savedir, saveas, which_predict), dpi=300,
bbox_inches='tight')
plt.close()
|
[
"chonloklei@gmail.com"
] |
chonloklei@gmail.com
|
9cf854e5f10787c00d66b46032ef3d8ea4d91943
|
8f021f68cd0949afa8d119582c0b419b014919d8
|
/URIOJ/uri2373.py
|
c142fff957d0f2e938944a791575fb1a31c7cdf5
|
[] |
no_license
|
Jonatankk/codigos
|
b9c8426c2f33b5142460a84337480b147169b3e6
|
233ae668bdf6cdd12dbc9ef243fb4ccdab49c933
|
refs/heads/master
| 2022-07-22T11:09:27.271029
| 2020-05-09T20:57:42
| 2020-05-09T20:57:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
# -*- coding: utf-8 -*-
# Leonardo Deliyannis Constantin
# URI 2373 - Garçom
def main():
N = int(input())
ans = 0
for _ in range(N):
L, C = map(int, input().split())
if L > C:
ans += C
print(ans)
if __name__ == '__main__':
while True:
try:
main()
except EOFError:
break
|
[
"constantin.leo@gmail.com"
] |
constantin.leo@gmail.com
|
13f374290b54460f585cc996dd27042b763b7bc7
|
46a5df524f1d96baf94f6eb0f6222f2b856235f3
|
/src/puzzle/problems/cryptogram_problem.py
|
c45547433c2d6fad7a436df5cd29d4b51b951e07
|
[
"MIT"
] |
permissive
|
PhilHarnish/forge
|
5dfbb0aa2afdb91e55d85187bd86fbeb9b6b2888
|
c544fb8b499e1e13793c94159f4c35bce187311e
|
refs/heads/master
| 2023-03-11T17:23:46.569359
| 2023-02-25T15:09:01
| 2023-02-25T15:09:01
| 1,818,598
| 2
| 0
|
MIT
| 2023-02-25T15:09:02
| 2011-05-29T19:36:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,120
|
py
|
from data import warehouse
from data.seek_sets import crypto_seek_set
from puzzle.problems import problem
# Humans will often choose a ROT value which is ~180 degrees away from A=A.
# For example: ROT13 is common and ROT1 or ROT25 are very uncommon.
_ROT_OFFSETS = list(sorted(range(1, 25), key=lambda i: abs(26 / 2 - i)))
_ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
_ALPHABET_UPPER = _ALPHABET.upper()
_ROT_TRANSLATIONS = [None] + [
str.maketrans(_ALPHABET, _ALPHABET[i:] + _ALPHABET[:i]) for i in range(1, 26)
]
# At least 1/5th of the words must convert.
_MIN_CONVERSION = 0.2
# Minimum threshold for an "interesting" translation.
_MIN_WORD_THRESHOLD = 45000
# Minimum number of characters to consider "translated".
_MIN_WORD = 3
# If Trie yields results greater than this per character it is "good".
_TARGET_WORD_SCORE_RATE = 200000000
class CryptogramProblem(problem.Problem):
def __init__(self, name, lines, **kwargs):
super(CryptogramProblem, self).__init__(name, lines, **kwargs)
_, self._words = _parse(lines)
@staticmethod
def score(lines):
# Look at all of the "words" in all lines.
tokens, words = _parse(lines)
if not words:
return 0 # Nothing to cryptogram.
if len(words) < len(tokens) // 2:
return 0 # Fewer than half of the tokens could be considered words.
# How many words appear to be gibberish?
known_words = warehouse.get('/words/unigram')
are_words = sum(word in known_words for word in words)
if are_words < len(words) // 8 + 1:
# Fewer than 1 in 8 of the original words are known.
return 1
# Something with 5+ of words *might* be a cryptic clue.
return max(0.0, 0.25 * (min(5, len(words)) / 5))
def _solve_iter(self):
# First attempt a rotN solve.
all_text = '\n'.join(self.lines)
good_match = False
for solution, weight in _generate_rot_n(all_text, self._words):
good_match = good_match or weight == 1
yield solution, weight
if good_match:
return
for solution in _generate_partitioned_cryptograms(all_text, self._words):
yield solution
def _solve(self):
raise NotImplementedError()
def _parse(lines):
tokens = ' '.join(lines).lower().split()
return tokens, list(filter(str.isalpha, tokens))
def _generate_rot_n(all_text, words):
for offset in _ROT_OFFSETS:
score = rot_n_score(words, offset)
if score > _MIN_CONVERSION:
solution = all_text.translate(_ROT_TRANSLATIONS[offset])
yield '%s (rot%s)' % (solution, offset), score
def rot_n_score(words, n):
""" Score `words` for rotation `n`.
:param words:
:param n:
:return: Returns 1 if every single word translates to a common word.
If all words are common score decreases proportional to chars translated.
If all translations are uncommon then
"""
unigrams = warehouse.get('/words/unigram')
score = 0
all = 0
for word in words:
l = len(word)
if l < _MIN_WORD:
continue
translated = word.translate(_ROT_TRANSLATIONS[n])
if translated in unigrams:
word_weight = min(1, unigrams[translated] / _MIN_WORD_THRESHOLD)
score += l * word_weight
all += l
return score / all
def _generate_partitioned_cryptograms(all_text, words):
# Focus on the longest words.
sorted_words = sorted(set(words), key=lambda x: -len(x))
trie = warehouse.get('/words/unigram/trie')
# Note: This score currently includes whitespace etc.
target_score = len(all_text) * _TARGET_WORD_SCORE_RATE
for trans, score in _partitioned_cryptograms_from(sorted_words, [], trie):
yield all_text.translate(trans), min(1, score / target_score)
def _partitioned_cryptograms_from(crypto_words, normal_words, trie):
pos = len(normal_words)
end = len(crypto_words) - 1
translation = _make_translation(crypto_words, normal_words)
seek_set = crypto_seek_set.CryptoSeekSet(
crypto_words[pos], translation=translation)
for word, score in trie.walk(seek_set, exact_match=True):
normal_words.append(word)
if pos == end:
yield _make_solution_translation_table(translation, crypto_words[pos],
normal_words[pos]), score
else:
for solution, child_score in _partitioned_cryptograms_from(
crypto_words, normal_words, trie):
# Up the trampoline, accumulating score.
yield solution, score + child_score
normal_words.pop()
def _make_translation(crypto_words, normal_words):
translation = {}
for crypto_word, normal_word in zip(crypto_words, normal_words):
for crypto_c, normal_c in zip(crypto_word, normal_word):
if crypto_c in translation and translation[crypto_c] != normal_c:
raise IndexError('Inconsistent translation %s -> %s' % (
crypto_words, normal_words))
translation[crypto_c] = normal_c
return translation
def _make_solution_translation_table(translation, last_crypto, last_word):
table = str.maketrans(translation)
table.update(str.maketrans(last_crypto, last_word))
# Determine upper case letters too.
table.update(
str.maketrans(_ALPHABET_UPPER, _ALPHABET.translate(table).upper()))
return table
|
[
"philharnish@gmail.com"
] |
philharnish@gmail.com
|
57e952da8acde84cb4f927b0adec5f8de45dfaef
|
74aea619a499c6cba424a3f790f36315e647c55e
|
/Dynamo/src/RH Polyline to DS Polycurve.py
|
44f0bdff25cfc21bf7f28ae66f98043f0c7ac3c9
|
[] |
no_license
|
mostaphaRoudsari/MantisShrimp
|
ca7c38af196116877efbab397adc17446616a9c8
|
b8c70b1403103d60f85699608161476d628afede
|
refs/heads/master
| 2021-01-15T09:41:44.314306
| 2015-01-13T23:56:00
| 2015-01-13T23:56:00
| 24,969,552
| 2
| 0
| null | 2015-01-13T23:56:00
| 2014-10-09T02:06:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,334
|
py
|
#Copyright(c) 2014, Konrad Sobon
# @arch_laboratory, http://archi-lab.net
import clr
import sys
clr.AddReference('ProtoGeometry')
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
import os
appDataPath = os.getenv('APPDATA')
msPath = appDataPath + r"\Dynamo\0.7\packages\Mantis Shrimp\extra"
if msPath not in sys.path:
sys.path.Add(msPath)
possibleRhPaths = []
possibleRhPaths.append(r"C:\Program Files\Rhinoceros 5 (64-bit)\System\RhinoCommon.dll")
possibleRhPaths.append(r"C:\Program Files\Rhinoceros 5.0 (64-bit)\System\RhinoCommon.dll")
possibleRhPaths.append(r"C:\Program Files\McNeel\Rhinoceros 5.0\System\RhinoCommon.dll")
possibleRhPaths.append(msPath)
checkPaths = map(lambda x: os.path.exists(x), possibleRhPaths)
for i, j in zip(possibleRhPaths, checkPaths):
if j and i not in sys.path:
sys.path.Add(i)
clr.AddReferenceToFileAndPath(i)
from Autodesk.DesignScript.Geometry import *
import Rhino as rc
#The inputs to this node will be stored as a list in the IN variable.
dataEnteringNode = IN
rhObjects = IN[0]
_units = IN[1]
#unit conversion function from Rhino to DS
def toDSUnits(_units):
if _units == rc.UnitSystem.Millimeters:
return 0.001
elif _units == rc.UnitSystem.Centimeters:
return 0.01
elif _units == rc.UnitSystem.Decimeters:
return 0.1
elif _units == rc.UnitSystem.Meters:
return 1
elif _units == rc.UnitSystem.Inches:
return 0.0254
elif _units == rc.UnitSystem.Feet:
return 0.3048
elif _units == rc.UnitSystem.Yards:
return 0.9144
#3dPoint Conversion function
def rhPoint3dToPoint(rhPoint):
rhPointX = rhPoint.X * toDSUnits(_units)
rhPointY = rhPoint.Y * toDSUnits(_units)
rhPointZ = rhPoint.Z * toDSUnits(_units)
return Point.ByCoordinates(rhPointX, rhPointY, rhPointZ)
#poly curve conversion function
def rhCurveToPolyCurve(rhCurve):
ptArray = []
pCount = rhCurve.PointCount
for i in range(0, pCount):
dsPoint = rhPoint3dToPoint(rhCurve.Point(i))
ptArray.append(dsPoint)
dsPolyCurve = PolyCurve.ByPoints(ptArray)
del ptArray[:]
return dsPolyCurve
#convert rhino/gh geometry to ds geometry
dsPolyCurves = []
for i in rhObjects:
try:
i = i.Geometry
except:
pass
if i.ToString() == "Rhino.Geometry.PolylineCurve":
dsPolyCurves.append(rhCurveToPolyCurve(i))
#Assign your output to the OUT variable
OUT = dsPolyCurves
|
[
"ksobon1986@gmail.com"
] |
ksobon1986@gmail.com
|
919ca6f258d0ad24aa2cd1da271099356e257b9d
|
e72c9e619629f1b29066bd05f76232895fb3586e
|
/srcGetskl/ore_reverbJar.py
|
6ef2d57a123dbf070429020207d2c7f74366c809
|
[] |
no_license
|
sarveshsparab/FrED
|
b671e356b71b143396c0bc7e98544eb5b3c065a4
|
87dd75a576e8270085d182cf78baaa7ccab84357
|
refs/heads/master
| 2020-04-25T20:34:20.152512
| 2018-11-12T13:24:39
| 2018-11-12T13:24:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,066
|
py
|
import sys
import os
sys.path.append("/home/yxqin/Scripts")
from strOperation import * # normRep normMen
####################################
#get relation skeletons from relation file(extracted by reverb.jar)
def getRelskl_fromRel(filename):
print "Processing " + filename
relFile = file(filename)
outputDir = os.path.split(filename)[0]
tStr = filename[-2:]
outputFile = file(outputDir + r"/relSkl_2013-01-" + tStr, "w")
lineIdx = 0
previousTid = tStr + "0"
previousText = ""
while 1:
lineStr = relFile.readline()
if len(lineStr) <= 0:
print str(lineIdx) + " lines are processed. End of file. " + str(time.asctime())
break
lineIdx += 1
arr = lineStr.split("\t")
relArr = []
#print arr
tid = tStr+arr[1]
arg1 = getArg(arr[-3])
rel = "_".join(arr[-2].split(" "))
arg2 = getArg(arr[-1][:-1])
conf = float(arr[11])
relArr.append(tid)
relArr.append(normRep(arg1))
relArr.append(normRep(rel))
relArr.append(normRep(arg2))
relArr.append(conf)
print relArr
text = "_".join(relArr[1:-1])
if tid != previousTid:
if len(previousText) > 1:
outputFile.write(previousTid + "\t" + previousText + "\n")
#print "## " + previousTid + " " + previousText
previousTid = tid
previousText = text
else:
previousText += (" "+text)
if lineIdx % 100000 == 0:
print "# tweets processed: " + str(lineIdx) + " at " + str(time.asctime())
outputFile.close()
relFile.close()
# normMen to be processed
def getArg(item):
if len(item) > 0:
return "_".join(normMen(item.split(" ")))
else:
return item
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage ore_reverbJar.py inputFileName"
else:
filename = sys.argv[1]
# extract skl from Relation file(reverb.jar)
getRelskl_fromRel(filename)
|
[
"qolina@gmail.com"
] |
qolina@gmail.com
|
e8026ecd42c8c44fa6417c976f50f828cc83b40e
|
5ca042838f15137130817b9e1766d8496a73d5db
|
/venv/bin/django-admin.py
|
f07913ab7e1689b81f250bf67a6114e95ec4e7ca
|
[
"MIT"
] |
permissive
|
Emmanuel-9/Neighbourhood
|
63257368e357adba3280f63a8f9d8ef77bcdfb23
|
f2635cbc00181da97bdf17dee283eb905db2ec55
|
refs/heads/master
| 2022-12-08T06:18:02.137084
| 2020-08-21T11:54:17
| 2020-08-21T11:54:17
| 288,374,723
| 0
| 1
| null | 2020-08-21T07:56:22
| 2020-08-18T06:32:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 705
|
py
|
#!/home/joan_e/code/Moringa/core/django/Neighbourhood/venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"joanevans18@gmail.com"
] |
joanevans18@gmail.com
|
90697d5b1e4c3cb7af501969295bfdaf846bf33f
|
708074835900ae623239ce3c0d1e6f948b799fd0
|
/ftp-1/ftp_server/bin/start.py
|
b5fa79aadef7fe47c9fffb6743a4175d84aeb3a0
|
[] |
no_license
|
hukeyy/learn_python
|
66688bcbaa43d79775030d2876979bbda08892ef
|
c71a37da88b089316536587ed47d32405bd987a3
|
refs/heads/master
| 2020-03-21T11:07:24.049328
| 2018-12-25T11:59:17
| 2018-12-25T11:59:17
| 138,490,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: hkey
import os, sys
BASE_DIR = os.path.dirname(os.getcwd())
sys.path.insert(0, BASE_DIR)
from modules import socket_server
from conf.settings import IP_PORT
if __name__ == '__main__':
server = socket_server.socketserver.ThreadingTCPServer((IP_PORT), socket_server.MyServer)
server.serve_forever()
|
[
"mickey.20@qq.com"
] |
mickey.20@qq.com
|
3a2837071c1a3bfbc6361ad134e368663c3f18d1
|
0a2cc497665f2a14460577f129405f6e4f793791
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/aio/operations/_backup_crr_job_details_operations.py
|
33a1fb75dc9c475d6bf5fbe74a51997aee684121
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
hivyas/azure-sdk-for-python
|
112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b
|
8b3258fa45f5dc25236c22ad950e48aa4e1c181c
|
refs/heads/master
| 2023-06-17T12:01:26.392186
| 2021-05-18T19:56:01
| 2021-05-18T19:56:01
| 313,761,277
| 1
| 1
|
MIT
| 2020-12-02T17:48:22
| 2020-11-17T22:42:00
|
Python
|
UTF-8
|
Python
| false
| false
| 4,994
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BackupCrrJobDetailsOperations:
"""BackupCrrJobDetailsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
azure_region: str,
parameters: "_models.CrrJobRequest",
**kwargs
) -> "_models.JobResource":
"""Get CRR job details from target region.
Get CRR job details from target region.
:param azure_region: Azure region to hit Api.
:type azure_region: str
:param parameters: CRR Job request.
:type parameters: ~azure.mgmt.recoveryservicesbackup.models.CrrJobRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.models.JobResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-20"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'azureRegion': self._serialize.url("azure_region", azure_region, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CrrJobRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.NewErrorResponseAutoGenerated, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.RecoveryServices/locations/{azureRegion}/backupCrrJob'} # type: ignore
|
[
"noreply@github.com"
] |
hivyas.noreply@github.com
|
ea9cae42173bae1b6fd88abe2e029323cb284b9b
|
fe0017ae33385d7a2857d0aa39fa8861b40c8a88
|
/env/lib/python3.8/site-packages/sklearn/manifold/spectral_embedding_.py
|
efa8372ddb519ebbe9f08ab3616ad83b8ebd6fad
|
[] |
no_license
|
enriquemoncerrat/frasesback
|
eec60cc7f078f9d24d155713ca8aa86f401c61bf
|
e2c77f839c77f54e08a2f0930880cf423e66165b
|
refs/heads/main
| 2023-01-03T23:21:05.968846
| 2020-10-18T21:20:27
| 2020-10-18T21:20:27
| 305,198,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
# mypy error: Module X has no attribute y (typically for C extensions)
from . import _spectral_embedding # type: ignore
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.manifold.spectral_embedding_'
correct_import_path = 'sklearn.manifold'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_spectral_embedding, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
|
[
"enriquemoncerrat@gmail.com"
] |
enriquemoncerrat@gmail.com
|
22148b082fd05a86e771c13e027a987707f444a9
|
8f6cc0e8bd15067f1d9161a4b178383e62377bc7
|
/__PPPPLLLLTTTT__LLLLOOOOGGGG/workspace/a3c/PPPPPPLLLLLLLOOOOOOTTTTTTT/trainer-family/plot_a3c_log3.py
|
813665c694efb63fdab1081416eb186ed9292934
|
[] |
no_license
|
humorbeing/python_github
|
9c4dfc61a3cefbb266fefff335f6b28d05797e5e
|
e4b4b49bee7e7e3843c6874717779ce8d619bd02
|
refs/heads/master
| 2023-01-22T21:51:20.193131
| 2020-01-26T21:47:23
| 2020-01-26T21:47:23
| 163,707,778
| 0
| 0
| null | 2022-12-27T15:37:48
| 2019-01-01T01:58:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,330
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def xy(name, num=None):
with open(name) as f:
lines = [line for line in f]
log = []
count = 0
step_stack = []
reward_stack = []
for line in lines:
count += 1
# if count % 500 == 0:
reads = line.split(',')
reads = [cleanse.strip() for cleanse in reads]
step_line = reads[1]
reward_line = reads[3]
# print(step_line)
# print(reward_line)
step_line = step_line.split(' ')
step_num = int(step_line[2])
# print(step_num)
# print(step_num+1)
reward_line = reward_line.split(' ')
# print(reward_line)
reward_num = float(reward_line[2])
# print(reward_num)
# print(reward_num+0.2)
# step_stack.append(step_num)
# reward_stack.append(reward_num)
log.append([step_num, reward_num])
# print('num raw data', count)
log = np.array(log)
# print(log.shape)
log = log[log[:, 0].argsort()]
# if count > 5000:
# break
# print(log)
logs = []
step_stack = []
reward_stack = []
if num is None:
num = 50
for count in range(len(log)):
# print(log[count])
step_stack.append(log[count][0])
reward_stack.append(log[count][1])
if count % num == 0:
s = np.mean(step_stack)
r = np.mean(reward_stack)
logs.append([s, r])
step_stack = []
reward_stack = []
log = np.array(logs)
# print(log.shape)
# print(log)
# log.sort(axis=0)
# print(log.shape)
# print(log.shape)
# print(log)
t_log = np.transpose(log)
# print(t_log.shape)
# print(t_log)
x = t_log[0]
y = t_log[1]
return x, y
def plot_this(file_name, plot_name, color=None, num=None):
x, y = xy(file_name, num=num)
ax.plot(x, y, label=plot_name, color=color)
# def plot_these(file_names, plot_name, color=None, num=None):
# xs =
# ys =
# plt.plot(x, y)
# plt.scatter(t_log[0], t_log[1])
fig, ax = plt.subplots()
# plot_this('test_log.txt', 'A3C')
# plot_this('a3c-1.txt', 'A3C')
# plot_this('a3c-200.txt', 'A3C')
# plot_this('a3c-500.txt', 'A3C')
plot_this('dmb-all.txt', 'DMB(Our)', 'r', 100)
plot_this('a3c-all.txt', 'A3C(Baseline)', 'g', 60)
# plot_this('dmb-freeze-all.txt', 'DMB(our), Freeze weight', 'r', num=60)
# plot_this('a3c-1-fre.txt', 'A3C, Freeze Weight')
# plot_this('a3c-all2.txt', 'A3C2-all')
plot_this('en-1.txt', 'Autoencoder', num=40)
# plot_this('en-fre.txt', 'AutoEncoder, Freeze weight')
# plot_this('g1-1.txt', '1')
# plot_this('g1-2-fre.txt', 'g2')
# plot_this('g1-200.txt', '2')
# plot_this('g1-500.txt', '3')
# plot_this('g1-1000.txt', '4')
# plot_this('g2-1.txt', '5')
# plot_this('g2-1000.txt', '6')
# plot_this('soso-1.txt', '7')
# plot_this('soso-1-fre.txt', '1000A3C')
# plot_this('soso-200.txt', '8')
# plot_this('soso-500.txt', '9')
# plot_this('mb-1.txt', 'Encoder')
# plot_this('mb-1-fre.txt', 'Model-Based, Freeze weight', 'y')
plot_this('mb-1000.txt', 'Model-based', num=60)
ax.grid(True)
ax.legend(loc='upper left')
ax.set_title('Pong-ram-v0')
ax.set_xlabel('Frame')
ax.set_ylabel('Episodic Reward')
ax.set_xlim(left=0, right=5000000*6)
ax.set_ylim(bottom=-22, top=-4)
plt.show()
|
[
"geemguang@gmail.com"
] |
geemguang@gmail.com
|
29a98f623108212df0f2f2577f6c897f848ea3db
|
5ccfa68d2e26facc7dd51a65bdb80d3372903365
|
/adventofcode.com/utils/color.py
|
62053fdefbb2c92149bccb26a3ed5c8c19c363b9
|
[] |
no_license
|
ceasaro/ceasaro_py
|
71b93165a48dce48b027c2a3c727f6fdeaf62f0f
|
a3e2c02140e2abc165cc522756a9bce1f422a9e2
|
refs/heads/master
| 2023-06-25T20:17:33.826726
| 2023-06-12T11:38:07
| 2023-06-12T11:38:07
| 12,404,093
| 0
| 0
| null | 2023-05-22T22:36:36
| 2013-08-27T11:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 631
|
py
|
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
LIGHT_GRAY = 37
GRAY = 90
LIGHT_RED = 91
LIGHT_GREEN = 92
LIGHT_YELLOW = 93
LIGHT_BLUE = 94
LIGHT_MAGENTA = 95
LIGHT_CYAN = 96
WHITE = 97
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def color_str(color, msg):
return f"\033[{color}m{msg}{ENDC}"
def background_color_str(color, msg):
return f"\033[{color+10}m{msg}{ENDC}"
def red(msg):
return color_str(RED, msg)
def light_red(msg):
return color_str(LIGHT_RED, msg)
def yellow(msg):
return color_str(YELLOW, msg)
def green(msg):
return color_str(GREEN, msg)
|
[
"ceesvw@gmail.com"
] |
ceesvw@gmail.com
|
52cc00f46994e1856562d91ec95b2bb010b70b6d
|
d0cb58e1658d4b5b88bdc07e497dc8092707ae02
|
/2020/08August/24PandasDataFrame13.py
|
0e993fe386a578073837a0851e831148ec09f103
|
[] |
no_license
|
June-fu/python365
|
27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c
|
242033a4b644a7566fbfa4dba9b60f60aa31fe91
|
refs/heads/master
| 2021-07-02T21:42:28.454091
| 2021-05-04T15:08:44
| 2021-05-04T15:08:44
| 233,629,713
| 0
| 0
| null | 2020-01-13T15:52:58
| 2020-01-13T15:36:53
| null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
#!/usr/bin/python
'''
# @ Author: june-fu
# @ Create Time: 2020-12-20 21:49:51
# @ Modified by: june-fu
# @ Modified time: 2020-12-20 22:09:47
# @ Description:
calculate the sum of the examination attempts by the students.
'''
import pandas as pd
import numpy as np
dct1 = {'name': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],
'score': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],
'attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(dct1, index=labels)
print(df)
print(df['attempts'].sum())
|
[
"fujun1990@gmail.com"
] |
fujun1990@gmail.com
|
80ca915f03a9b34dea6395041cf2e15786e0e031
|
91ff6fdf7b2ccc58869d6ad41842f230644952c1
|
/ultratech_core/migrations/0001_initial.py
|
4860a82e6b68f5421ad191cac9812d4f5cdc2779
|
[] |
no_license
|
KONASANI-0143/Dev
|
dd4564f54117f54ccfa003d1fcec4220e6cbe1f9
|
23d31fbeddcd303a7dc90ac9cfbe2c762d61c61e
|
refs/heads/master
| 2023-08-14T15:59:59.012414
| 2021-10-13T14:54:49
| 2021-10-13T15:10:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
# Generated by Django 3.0.6 on 2020-05-21 06:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Preferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('key', models.CharField(max_length=50)),
('value', models.CharField(max_length=10000)),
('description', models.TextField()),
],
options={
'verbose_name': 'Ultra Tech Preference',
'verbose_name_plural': 'Ultra Tech Preferences',
'db_table': 'ultra_tech_preferences',
},
),
]
|
[
"harinadhareddypython@gmail.com"
] |
harinadhareddypython@gmail.com
|
6d48e276f58017bdc4f368b3f92159159624a379
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/SOLOv2/mmcv/mmcv/utils/__init__.py
|
f75168a74ed5087b90edea3d504a8a1a673eba24
|
[
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
from .config import Config, ConfigDict
from .misc import (check_prerequisites, concat_list, is_list_of, is_seq_of,
is_str, is_tuple_of, iter_cast, list_cast,
requires_executable, requires_package, slice_list,
tuple_cast)
from .path import (FileNotFoundError, check_file_exist, fopen, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
__all__ = [
'ConfigDict', 'Config', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'FileNotFoundError', 'ProgressBar', 'track_progress',
'track_iter_progress', 'track_parallel_progress', 'Timer', 'TimerError',
'check_time'
]
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
434f96c146f909870b57c3cc98deab45416581e2
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/costa_coffee_gg_gb_im_je.py
|
a4e2022daf006b70b9e6b2c80b60d7fc022e2027
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,603
|
py
|
from scrapy import Spider
from scrapy.http import JsonRequest
from locations.categories import Categories, Extras, apply_category, apply_yes_no
from locations.dict_parser import DictParser
from locations.geo import point_locations
from locations.hours import OpeningHours
class CostaCoffeeGGGBIMJESpider(Spider):
name = "costa_coffee_gg_gb_im_je"
item_attributes = {"brand": "Costa Coffee", "brand_wikidata": "Q608845"}
allowed_domains = ["www.costa.co.uk"]
start_urls = ["https://www.costa.co.uk/api/mdm/"]
custom_settings = {"ROBOTSTXT_OBEY": False} # No robots.txt. 404 HTML page returned instead.
def start_requests(self):
graphql_query_template = """query Sites {
sites(
siteStatuses: ["OPEN"]
tradingStatusAvailable: true
geo: {
latitude: __LATITUDE__
longitude: __LONGITUDE__
}
countries: "GB"
orderBy: { distance: ASC }
first: 2500
) {
items {
id
extendedName: name
location {
address {
address1
address2
city
postCode
}
geo {
latitude
longitude
distanceMiles
}
}
siteType
facilities {
babyChanging
clickAndServe
coffeeClub
collect
delivery
disabledAccess
disabledWC
driveThru
giftCard
preOrderCollect
tooGoodToGo
wifi
}
expressMachines {
characteristics {
icedDrinks
}
}
operatingHours(timeTypes: ["Standard"]) {
Monday: monday {
open24Hours
open
close
}
Tuesday: tuesday {
open24Hours
open
close
}
Wednesday: wednesday {
open24Hours
open
close
}
Thursday: thursday {
open24Hours
open
close
}
Friday: friday {
open24Hours
open
close
}
Saturday: saturday {
open24Hours
open
close
}
Sunday: sunday {
open24Hours
open
close
}
}
name: knownAs
}
}
}"""
for lat, lon in point_locations("gg_gb_im_je_centroids_iseadgg_50km_radius.csv"):
graphql_query = graphql_query_template.replace("__LATITUDE__", str(lat)).replace("__LONGITUDE__", str(lon))
yield JsonRequest(url=self.start_urls[0], data={"query": graphql_query})
def parse(self, response):
for location in response.json()["data"]["sites"]["items"]:
item = DictParser.parse(location)
if location["siteType"] == "Global Express":
item["brand"] = "Costa Express"
item["brand_wikidata"] = "Q113556385"
apply_category(Categories.VENDING_MACHINE_COFFEE, item)
else:
apply_category(Categories.COFFEE_SHOP, item)
item["lat"] = location["location"]["geo"]["latitude"]
item["lon"] = location["location"]["geo"]["longitude"]
item["street_address"] = ", ".join(
filter(None, [location["location"]["address"]["address1"], location["location"]["address"]["address2"]])
)
item["city"] = location["location"]["address"]["city"]
item["postcode"] = location["location"]["address"]["postCode"]
if item["postcode"]:
if item["postcode"][:2] == "GY":
item["country"] = "GG"
elif item["postcode"][:2] == "IM":
item["country"] = "IM"
elif item["postcode"][:2] == "JE":
item["country"] = "JE"
else:
item["country"] = "GB"
if len(location["operatingHours"]) > 0:
item["opening_hours"] = OpeningHours()
for day_name, day_hours in location["operatingHours"][0].items():
if day_hours["open24Hours"]:
item["opening_hours"].add_range(day_name, "00:00", "24:00")
else:
item["opening_hours"].add_range(day_name, day_hours["open"], day_hours["close"])
apply_yes_no(Extras.BABY_CHANGING_TABLE, item, location["facilities"].get("babyChanging"), False)
apply_yes_no(Extras.DELIVERY, item, location["facilities"].get("delivery"), False)
apply_yes_no(Extras.WHEELCHAIR, item, location["facilities"].get("disabledAccess"), False)
apply_yes_no(Extras.TOILETS_WHEELCHAIR, item, location["facilities"].get("disabledWC"), False)
apply_yes_no(Extras.DRIVE_THROUGH, item, location["facilities"].get("driveThru"), False)
apply_yes_no(Extras.WIFI, item, location["facilities"].get("wifi"), False)
yield item
|
[
"noreply@github.com"
] |
alltheplaces.noreply@github.com
|
7128fdf457058ab4d21c61a581c11328e4ce0797
|
02c394db353d996038c9bedbeaf91bb080c12ca2
|
/dsm/epaxos/inst/store.py
|
28229c7aa18c70adcccb274eb860057ec8542551
|
[
"MIT"
] |
permissive
|
Limber0117/python-epaxos
|
0633752cffaca65c0d8b9c3aecf9c8bc6ca70f3e
|
e68bab50e7df32770103196c91d8708863691579
|
refs/heads/master
| 2021-08-23T22:31:47.283682
| 2017-12-06T22:16:21
| 2017-12-06T22:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,719
|
py
|
import logging
from typing import NamedTuple, Dict, Optional, Tuple
from uuid import UUID
from dsm.epaxos.cmd.state import CommandID
from dsm.epaxos.inst.deps.cache import KeyedDepsCache
from dsm.epaxos.inst.state import State, Ballot, Slot, Stage
class InstanceStoreState(NamedTuple):
ballot: Ballot
state: State
def __repr__(self):
return f'ISS({self.ballot},{self.state})'
logger = logging.getLogger(__name__)
class TransitionException(Exception):
def __init__(self, slot: Slot, curr_inst: Optional[InstanceStoreState], new_inst: Optional[InstanceStoreState]):
self.slot = slot
self.inst = curr_inst
class IncorrectBallot(TransitionException):
pass
class IncorrectStage(TransitionException):
pass
class IncorrectCommand(TransitionException):
pass
class SlotTooOld(TransitionException):
pass
class LoadResult(NamedTuple):
exists: bool
inst: InstanceStoreState
def between_checkpoints(old, new):
for x in new.keys():
max_slot = new.get(x, Slot(x, 0))
low_slot = old.get(x, Slot(x, 0))
for y in range(low_slot.instance_id, max_slot.instance_id):
yield Slot(x, y)
CP_T = Dict[int, Slot]
class CheckpointCycle:
def __init__(self):
# [ old ][ mid ][ current ]
self.cp_old = {} # type: CP_T
self.cp_mid = {} # type: CP_T
def earlier(self, slot: Slot):
return slot < self.cp_old.get(slot.replica_id, Slot(slot.replica_id, -1))
def cycle(self, cp: Dict[int, Slot]) -> Tuple[CP_T, CP_T]:
"""
:param cp: new checkpoint
:return: range of the recycled checkpoint
"""
cp_prev_old = self.cp_old
cp_prev_mid = self.cp_mid
cp_old = {**self.cp_old, **self.cp_mid}
cp_mid = {**self.cp_mid, **cp}
self.cp_old = cp_old
self.cp_mid = cp_mid
return cp_prev_old, cp_prev_mid
def __repr__(self):
o = sorted(self.cp_old.items())
m = sorted(self.cp_mid.items())
return f'CheckpointCycle({o}, {m})'
class InstanceStore:
def __init__(self):
self.inst = {} # type: Dict[Slot, InstanceStoreState]
self.cmd_to_slot = {} # type: Dict[CommandID, Slot]
self.deps_cache = KeyedDepsCache()
self.cp = CheckpointCycle()
def set_cp(self, cp: Dict[int, Slot]):
for slot in between_checkpoints(*self.cp.cycle(cp)):
if slot in self.inst:
assert self.inst[slot].state.stage == Stage.Committed, 'Attempt to checkpoint before Commit'
del self.inst[slot]
def load(self, slot: Slot):
if self.cp.earlier(slot):
raise SlotTooOld(slot, None, None)
r = self.inst.get(slot)
exists = True
if r is None:
exists = False
r = InstanceStoreState(
slot.ballot_initial(),
State(
Stage.Prepared,
None,
-1,
[]
)
)
return LoadResult(exists, r)
def load_cmd_slot(self, id: CommandID) -> Optional[Tuple[Slot, InstanceStoreState]]:
r = self.cmd_to_slot.get(id)
if not r:
return None
else:
return r, self.load(r).inst
def update(self, slot: Slot, new: InstanceStoreState):
exists, old = self.load(slot)
if new.ballot < old.ballot:
raise IncorrectBallot(slot, old, new)
if new.state.stage < old.state.stage:
raise IncorrectStage(slot, old, new)
if old.state.stage > Stage.PreAccepted and old.state.command is not None and old.state.command != new.state.command:
raise IncorrectCommand(slot, old, new)
if new.state.stage == Stage.PreAccepted and new.state.command:
# rethink the command ordering
seq, deps = self.deps_cache.xchange(slot, new.state.command)
upd = InstanceStoreState(
new.ballot,
State(
new.state.stage,
new.state.command,
max(seq, new.state.seq),
sorted(set(new.state.deps + deps))
)
)
else:
upd = new
self.inst[slot] = upd
if exists and old.state.command:
if old.state.command.id in self.cmd_to_slot:
del self.cmd_to_slot[old.state.command.id]
else:
logger.error(f'Command id {old.state.command} not found in self.cmd_to_slot')
if new.state.command:
self.cmd_to_slot[new.state.command.id] = slot
return old, upd
|
[
"acizov@gmail.com"
] |
acizov@gmail.com
|
42214b04b82a14043926515e9a9d0b506da81f74
|
98a359465e6e0620accede5b87b819aed663179d
|
/schol_library/migrations/0167_auto_20200526_1433.py
|
0bcdcd0a3380ba4d95d05f24f178d1bf85eff7eb
|
[] |
no_license
|
mustavfaa/back-end
|
88f8674bd6c2f8d0c4984a2a3d34f2aece3ec8d1
|
6635e8f504c7a7ba9709121b4dd8d5ccecdf05ca
|
refs/heads/main
| 2023-08-15T10:48:03.461138
| 2021-09-27T15:26:03
| 2021-09-27T15:26:03
| 410,938,832
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
# Generated by Django 2.2 on 2020-05-26 08:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('schol_library', '0166_auto_20200526_1432'),
]
operations = [
migrations.AlterUniqueTogether(
name='checkidrequestedition',
unique_together=set(),
),
migrations.AlterUniqueTogether(
name='editionbooksorder',
unique_together=set(),
),
migrations.AlterUniqueTogether(
name='numberbooks',
unique_together=set(),
),
]
|
[
"72229762+mustavfaa@users.noreply.github.com"
] |
72229762+mustavfaa@users.noreply.github.com
|
4009119be865df166884eaf6f38adf6113478806
|
3f1dab410b11b1f7b3979a2436bcc099edf3b9c1
|
/src/graph_transpiler/webdnn/frontend/chainer/converter.py
|
d08dd2f7ae6a742adb81b3763fa4cf68b5489aeb
|
[
"Zlib",
"MIT"
] |
permissive
|
qifu99/webdnn
|
c7386ee3db3adbb718e9c71771a77ffe839b892f
|
dbf6c22e2555988d098575595cbc37fc042bc713
|
refs/heads/master
| 2021-01-19T08:04:27.598406
| 2017-08-17T01:17:06
| 2017-08-17T01:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,401
|
py
|
# -*- coding:utf-8 -*-
"""
Chainer Link -> Graph object converters
Assuming Chainer 1.23 or 2.0
"""
import warnings
from typing import List, Union, Sequence, Set
from chainer import Function
from webdnn.frontend.constraints import AxisVar
from webdnn.frontend.converter import Converter
from webdnn.graph.graph import Graph
from webdnn.graph.order import Order
from webdnn.graph.variable import Variable
from webdnn.graph.variables.constant_variable import ConstantVariable
from webdnn.util import console
FLAG_CHAINER_INSTALLED = False
try:
import chainer
import chainer.computational_graph
if chainer.__version__ >= "2.":
chainer_v2 = True
# noinspection PyUnresolvedReferences
VariableNode = chainer.variable.VariableNode
else:
chainer_v2 = False
VariableNode = chainer.variable.Variable
FLAG_CHAINER_INSTALLED = True
except ImportError as e:
console.debug("Chainer is not completely installed.")
pass
def _to_variable_node(chainer_variable: Union["chainer.Variable", "VariableNode"]) -> "VariableNode":
if chainer_v2 and not isinstance(chainer_variable, VariableNode):
# noinspection PyUnresolvedReferences
return chainer_variable.node
else:
# noinspection PyTypeChecker
return chainer_variable
def _listup_functions(inputs: Sequence["VariableNode"], outputs: Sequence["VariableNode"]):
stack = list(outputs) # type: List[Union[VariableNode, Function]]
resolved = set(inputs) # type: Set[Union[VariableNode, Function]]
result = [] # type: List[Function]
while len(stack) > 0:
node = stack.pop(0)
if node in resolved:
continue
if isinstance(node, VariableNode):
prev_nodes = [] if node.creator is None else [node.creator]
else:
prev_nodes = node.inputs
unresolved_prevs = [prev_node for prev_node in prev_nodes if prev_node not in resolved]
if len(unresolved_prevs) == 0:
resolved.add(node)
if isinstance(node, Function):
result.append(node)
else:
stack.append(node)
stack += unresolved_prevs
return result
class ChainerConverter(Converter["Function"]):
"""ChainerConverter()
"""
def __init__(self):
if not FLAG_CHAINER_INSTALLED:
raise ImportError("ImportError is occurred when chainer is loaded.")
def convert_from_inout_vars(self, inputs: List["chainer.Variable"], outputs: List["chainer.Variable"]):
"""convert_from_inout_vars(inputs, output)
Construct computational graph from input and output chainer variables, and convert the graph into WebDNN IR.
Args:
inputs(list of chainer.Variable): input chainer variables
outputs(list of chainer.Variable): output chainer variables
.. warning::
This method will be removed in the future version. Use :func:`~webdnn.frontend.chainer.ChainerConverter.convert(inputs,
outputs)`.
.. admonition:: Example
.. code::
model = chainer.links.model.vision.resnet.ResNet50Layers()
# Forward propagation with dummy input to build computational graph
x = chainer.Variable(np.empty((1, 3, 224, 224), dtype=np.float32))
y = model(x, layers=["fc6"])["fc6"]
graph = ChainerConverter().convert_from_inout_vars([x], [y])
Returns:
(:class:`~webdnn.Graph`): WebDNN Graph
"""
warnings.warn("This method will be removed in the future version. Use ChainerConverter#convert(inputs, outputs).",
DeprecationWarning)
return self.convert(inputs, outputs)
def convert(self, inputs: List["chainer.Variable"], outputs: List["chainer.Variable"]) -> Graph:
"""convert(inputs, outputs)
Convert chainer computational graph into WebDNN IR.
Args:
inputs(list of chainer.Variable): input chainer variables
outputs(list of chainer.Variable): output chainer variables
.. admonition:: Example
.. code::
model = chainer.links.model.vision.resnet.ResNet50Layers()
# Forward propagation with dummy input to build computational graph
x = chainer.Variable(np.empty((1, 3, 224, 224), dtype=np.float32))
y = model(x, layers=["fc6"])["fc6"]
graph = ChainerConverter().convert_from_inout_vars([x], [y])
Returns:
(:class:`~webdnn.Graph`): WebDNN Graph
"""
chainer_graph = chainer.computational_graph.build_computational_graph(outputs)
# In chainer v2, variables are represented as Variable and VariableNode object, and
# graph information such as edge connection is contained in variable node.
# Therefore all chainer variable must be normalized into variable node.
c_vars = list(map(_to_variable_node,
filter(lambda v: isinstance(v, VariableNode), chainer_graph.nodes))) # type: List[VariableNode]
inputs = [_to_variable_node(v) for v in inputs]
outputs = [_to_variable_node(v) for v in outputs]
for c_var in c_vars:
if c_var.creator is None:
# If :code:`creator is None` and it's not input variable, it's parameter.
self._convert_var(c_var, constant=c_var not in inputs)
for c_opr in _listup_functions(inputs, outputs):
self._convert_operator(c_opr)
graph = Graph([self.get_variable(c_var) for c_var in inputs],
[self.get_variable(c_var) for c_var in outputs])
return graph
def _convert_var(self, c_var: "VariableNode", constant=False):
assert not self.has_variable(c_var), f"{c_var} is already converted"
ndim = len(c_var.shape)
order = Order([AxisVar() for _ in range(ndim)])
if constant:
data = c_var.data
if chainer_v2 and data is None:
# noinspection PyProtectedMember
data = c_var._variable().data
n_var = ConstantVariable(chainer.cuda.to_cpu(data), order) # force on CPU
else:
n_var = Variable(c_var.shape, order)
self.set_variable(c_var, n_var)
return n_var
|
[
"y.kikura@gmail.com"
] |
y.kikura@gmail.com
|
30197389acb3578590648bb805b98e79d74595bc
|
573220da9574b1ca16b530b93eb6801838b38ee5
|
/app.py
|
d18c2cd5260fb14e64a22287e9ee9df08a46bb0b
|
[] |
no_license
|
nova-sangeeth/Flask__blog__main
|
0810879d9ed09940e334f0fa5827c74acbcd5dfd
|
15b95f180608d051e3deb4aaf8f3a4889fc3d381
|
refs/heads/master
| 2020-09-14T03:21:22.758857
| 2020-06-18T17:55:25
| 2020-06-18T17:55:25
| 223,000,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config[
"SQLALCHEMY_DATABASE_URI"
] = "sqlite:///blog.db" # /// means a relative path, //// means it is a absolute path.
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
# creating the database file
db = SQLAlchemy(app)
# models ------ classes
class BlogPost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256), nullable=False) # this is a required field.
content = db.Column(db.Text, nullable=False) # this is a required field.
author = db.Column(db.String(128), nullable=False, default="N/A")
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return "Blog Post" + str(self.id)
# model for the user authentication
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100), unique=True)
name = db.Column(db.String(100))
@app.route("/")
def home():
return render_template("home.html")
@app.route("/posts", methods=["GET", "POST"])
def posts():
if request.method == "POST":
post_title = request.form["title"]
post_content = request.form["content"]
post_author = request.form["author"]
new_post = BlogPost(title=post_title, content=post_content, author=post_author)
db.session.add(new_post) # session.add only saves the data for temporary use.
db.session.commit() # to save the data always commit the database.
return redirect("/posts")
else:
all_posts = BlogPost.query.order_by(BlogPost.date_created).all()
return render_template("posts.html", posts=all_posts)
@app.route("/posts/delete/<int:id>")
def delete(id):
post = BlogPost.query.get_or_404(id)
db.session.delete(post)
db.session.commit()
return redirect("/posts")
@app.route("/posts/edit/<int:id>", methods=["GET", "POST"])
def edit(id):
post = BlogPost.query.get_or_404(id)
if request.method == "POST":
post.title = request.form["title"]
post.author = request.form["author"]
post.content = request.form["content"]
db.session.commit()
return redirect("/posts")
else:
return render_template("edit.html", post=post)
@app.route("/posts/new", methods=["GET", "POST"])
def new_post():
if request.method == "POST":
post_title = request.form["title"]
post_content = request.form["content"]
post_author = request.form["author"]
new_post = BlogPost(title=post_title, content=post_content, author=post_author)
db.session.add(new_post) # session.add only saves the data for temporary use.
db.session.commit() # to save the data always commit the database.
return redirect("/posts")
else:
all_posts = BlogPost.query.order_by(BlogPost.date_created).all()
return render_template("new_post.html")
if __name__ == "__main__":
app.run(debug=True)
|
[
"novasangeeth@outlook.com"
] |
novasangeeth@outlook.com
|
871bd249f549b78762891781da0e7e74582e8d83
|
80be1fa4b81a78e1afafe5092634e8dc318010a9
|
/yoggsaron/models/__init__.py
|
cffbbc69dc45919f05d008a10e9f275d9d8c1321
|
[] |
no_license
|
tonicbupt/c-thun
|
10ad152e0ce034a1857c8f8d53041fae53cce3ab
|
e851bfc4fd3e733e038f6ceea90f7b347c3e77cc
|
refs/heads/master
| 2021-01-22T05:28:22.364857
| 2014-08-26T15:30:18
| 2014-08-26T15:30:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
# coding: utf-8
from .dish import Dish
from .order import OrderDish, Order
from .restaurant import Restaurant
__all__ = ['Dish', 'Order', 'Restaurant']
|
[
"tonicbupt@gmail.com"
] |
tonicbupt@gmail.com
|
b4cb7d1748afcca8c61756c46770aaefb3bc8952
|
8b6cd902deb20812fba07f1bd51a4460d22adc03
|
/back-end/.history/djreact/users/serializers_20191221131333.py
|
1e1bb2bcff5539a9496bdade983d74e2f31a8f90
|
[] |
no_license
|
vishaldenzil/Django-react-
|
f3a49d141e0b6882685b7eaa4dc43c84857f335a
|
35b6d41f6dacb3bddcf7858aa4dc0d2fe039ff98
|
refs/heads/master
| 2022-11-08T09:27:02.938053
| 2020-05-29T04:53:52
| 2020-05-29T04:53:52
| 267,768,028
| 0
| 1
| null | 2022-10-15T14:08:30
| 2020-05-29T04:52:20
|
Python
|
UTF-8
|
Python
| false
| false
| 191
|
py
|
from rest_framework import serializers
from .models import User
class UserRegistrationSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__
|
[
"vishal.denzil@ezedox.com"
] |
vishal.denzil@ezedox.com
|
f13c714b2b66ea392c8848f14cedd68993446987
|
852a91492a737e9a2c210df883029b684ca6a448
|
/jade2/basic/structure/biopython_util.py
|
e507bcba04a299110c4659211203f75c5c837ef4
|
[
"BSD-3-Clause"
] |
permissive
|
jadolfbr/jade2
|
cb33f4a8cbf560f5ebaef4de2789ff50f372ff5a
|
91f18d6004f123d11ea8618171154aa25a7444e9
|
refs/heads/main
| 2022-09-12T06:23:23.356864
| 2022-03-24T20:15:13
| 2022-03-24T20:15:13
| 427,541,475
| 0
| 0
|
NOASSERTION
| 2021-11-13T01:34:34
| 2021-11-13T01:34:34
| null |
UTF-8
|
Python
| false
| false
| 4,320
|
py
|
import gzip
import os
import sys
import logging
from pathlib import Path
from typing import Union, List
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.MMCIFParser import MMCIFParser
from Bio.PDB.Residue import Residue
from Bio.PDB.Structure import Structure
from Bio.PDB.Chain import Chain
from Bio.PDB.Model import Model
from jade2.basic.restype_definitions import RestypeDefinitions
from jade2.basic.numeric import *
### NOTE: All Utility function have been replaced by a Bio Structure wrapper: BioPose.
### Please see this new class for future developments!
######## NEW Biopython utility functions ##########
def is_connected_to_next(res1:Residue, res2: Residue):
"""
Return the bond distance between two residues using Numpy array math.
:param res1: Bio.PDB.Residue.Residue
:param res2: Bio.PDB.Residue.Residue
:rtype: float
"""
distance = atomic_distance(res1, res2, 'C', 'N')
if distance <= float(1.8):
return True
else:
return False
def is_connected_to_prev(res1, res2) -> bool:
"""
Return the bond distance between two residues using Numpy array math.
:param res1: Bio.PDB.Residue.Residue
:param res2: Bio.PDB.Residue.Residue
:rtype: float
"""
distance = atomic_distance(res1, res2, 'N', 'C')
if distance <= float(1.8):
return True
else:
return False
def atomic_distance(res1: Residue, res2: Residue, res1_atom_name: str, res2_atom_name: str) -> float:
"""
Return the atomic distance between two arbitrary Bio residues and two arbitrary atom names.
:param res1: Bio.PDB.Residue.Residue
:param res2: Bio.PDB.Residue.Residue
:param res1_atom_name: str
:param res2_atom_name: str
:rtype: float
"""
try:
return distance_numpy(res1[res1_atom_name].get_vector().get_array(), res2[res2_atom_name].get_vector().get_array())
except Exception:
logging.debug("Residue does not have the atom name or there is a problem in the vector. Returning 0")
raise IndexError
######## OLD Biopython Utility Functions replaced by BIOPose ########
def has_id(model, id) -> bool:
"""
Returns true or false if the model has the chain. Because biopython is not updating it's index that has_id is using. WTF.
"""
for i in model:
if i.id == id:
return True
return False
def get_biopython_structure(path: Union[Path, str], model_id = None) -> Structure:
structure = None
path = str(path).strip()
parser = PDBParser()
cif_parser = MMCIFParser()
extSP: List[str] = os.path.basename(path).split('.')
if not model_id:
model_id = os.path.basename(str(path))
if extSP[-1] == "pdb":
structure = parser.get_structure(model_id, path)
elif extSP[-1] == "cif":
structure = cif_parser.get_structure(model_id, path)
elif extSP[-1] == 'gz':
GZ = gzip.open(path, 'rb')
if extSP[-2] == 'pdb':
structure = parser.get_structure(model_id, GZ)
elif extSP[-2] == 'cif':
structure = cif_parser.get_structure(model_id, GZ)
else:
sys.exit("Unknown GZipped extenstion: "+path)
GZ.close()
else:
sys.exit("Unknown extension to read PDB: "+path)
return structure
def get_seq_from_biostructure(structure: Structure, chain_id) -> str:
for biochain in structure[0]:
if get_chain_length(biochain) == 0:
continue
if biochain.id == chain_id:
return get_seq_from_biochain(biochain)
print("Chain not found!")
raise LookupError
def get_seq_from_biochain(bio_chain: Chain) -> str:
if get_chain_length(bio_chain) == 0:
return ""
seq = ""
d = RestypeDefinitions()
for res in bio_chain:
if res.id[0]==' ':
aa = d.get_one_letter_from_three(res.resname)
if not aa:
logging.debug("Skipping non-canonical resname: "+res.resname)
logging.debug("This could pose a problem!")
continue
seq = seq+aa
return seq
def get_chain_length(bio_chain: Chain) -> int:
l = 0
for res in bio_chain:
if res.id[0]==' ':
l+=1
return l
def get_num_biochains(model: Model) -> int:
return len(model[0])
|
[
"jadolfbr@gmail.com"
] |
jadolfbr@gmail.com
|
d66bef1246c571ddf493cf587902f23858053b58
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03785/s746303636.py
|
aef71741efdde9c3454a67489cef28a92a40bd56
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
n, c, k = map(int, input().split())
t_l = []
for _ in range(n):
t_l.append(int(input()))
t_l.sort()
ans = 0
cnt = 0
for t in t_l:
if cnt == 0:
t1 = t
if t <= t1 + k:
cnt += 1
if cnt == c:
cnt = 0
ans += 1
else:
cnt = 1
ans += 1
t1 = t
if cnt != 0:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
242d70d5677634496d3948cbd845413b2fdd04b8
|
6ecd1efd0af4b5ec05ddc70981387e54a55c424e
|
/grapy/core/__init__.py
|
c6ea1d60632ff6676b419d92c04f6a031ccc24ec
|
[] |
no_license
|
oscar810429/grapy
|
ab8a3a2717b855c7f11d97a8c28fa3b9a0752591
|
725db528f01a50cc7f88fc3002148eb85d8da740
|
refs/heads/master
| 2021-05-03T23:15:06.933436
| 2017-12-07T09:03:31
| 2017-12-07T09:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
from .engine import Engine
from .base_spider import BaseSpider
from .base_sched import BaseScheduler
from .base_request import BaseRequest
from .item import Item, dump_item, load_item
__all__ = ['Engine', 'BaseSpider', 'BaseScheduler', 'BaseRequest',
'Item', 'dump_item', 'load_item']
|
[
"lmjubuntu@gmail.com"
] |
lmjubuntu@gmail.com
|
74d4e33203cc25c5e02beea9fc3531b76b8cb52e
|
a1f2df675cfc595b15f1ca9390b7517989f2d4e0
|
/testCase/contacts/testUpdateContact.py
|
3229d0b16fcaad3fb6eae0528dab1a62be7cb76c
|
[] |
no_license
|
GGGYB/crm
|
d4def2f1abc89451e1c4b11b89ef100a842ed745
|
61932466dd0ac299adc661383d506389d5f0f8e7
|
refs/heads/master
| 2022-04-15T21:29:48.539311
| 2020-04-14T10:23:41
| 2020-04-14T10:23:41
| 255,575,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,600
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Jun'
from bs4 import BeautifulSoup
import re
from commons import common
from commons.const import const
from testCase.users import testGetUser as users
class UpdateContacts:
def __init__(self, cookie, csrf):
self.common = common.Common(cookie, csrf)
self.users = users.GetUser(cookie, csrf)
self.base_url = const.BASE_URL
self.base_url2 = const.SIGN_IN_BASE_URL
self.csrf = csrf
self.cookie = cookie
self.response = ''
self.users_id = []
self.customers_id = []
# 批量转移联系人
def update_contacts_by_scope(self, scope, contact_ids):
url = self.base_url + 'contacts/massive_transfer'
body = {}
self.common.get_response_json(url, body, '打开批量转移联系人的创窗口')
#获取用户id
self.users_id = self.users.getUserId()
#转移
url = self.base_url + 'api/contacts/mass_transfer'
params = {
'authenticity_token':self.csrf,
'user_id': self.users_id[0],
'transfer_contracts': 'false',
'transfer_opportunities': 'false',
'nowin_opportunities': 'false',
'contact_ids[]': contact_ids[0],
'contact_ids[]': contact_ids[1]
}
self.common.put_response_json(url, params, '批量转移联系人')
#批量编辑联系人
def batch_update_contacts(self, scope, contact_ids):
url = self.base_url + 'batch_edit/field_form?model=Contact'
params = {}
response = self.common.get_response_json(url, params, '打开批量编辑联系人的页面')
soup = BeautifulSoup(response.content, 'html.parser')
optional_field = soup.find(attrs={'id': 'field_choice'})
fields = re.findall(r"value=\"(.*?)\">", str(optional_field))
selected_fields = soup.findAll(attrs={'class': 'batch-edit-custom-field hidden'})
selected_field_list = []
for i in selected_fields:
selected_field = re.findall(r"<option value=\"(.*?)\">", str(i))
selected_field_list.append(selected_field)
url = self.base_url + 'api/contacts/batch_update'
params = {
'utf8': '✓',
'authenticity_token': self.csrf,
'field_choice': fields[3],
'contact['+fields[3]+']':selected_field_list[2][2],
'ids[]': contact_ids[0],
'ids[]': contact_ids[1]
}
self.common.put_response_json(url, params, '批量编辑联系人')
#快捷编辑联系人
def quick_edit_contacts(self, contact_id):
url = self.base_url + 'quick_edit/field_form?model=Contact&id=250705&field_name=name&page_type=index&_=1534228607097'
params = {
'model':'Contact',
'id':contact_id,
'field_name':'address.phone',
'page_type':'index',
}
self.common.get_response_json(self, params, '快捷编辑联系人获取当前联系人的field name')
url = self.base_url + 'api/contacts/' + str(contact_id)
params = {
'utf8': '✓',
'_method': 'patch',
'authenticity_token': self.csrf,
'contact[id]': contact_id,
'contact[name]': 'contact name'
}
#查重字段
def check_duplicate_field(self, contact_id):
url = self.base_url + 'api/contacts/check_duplicate_field.json'
params = {
'field':'tel',
'field_value':'13512341234',
'contact_id':contact_id
}
response = self.common.post_response_json(url, params, '查询电话是否重复')
#写跟进
def write_revisit_log(self, scope, contact_id):
url = self.base_url + 'contacts/'+str(contact_id)+'/revisit_logs/new'
params = {}
self.common.get_response_json(url, params, '打开写跟进窗口')
url = self.base_url + 'contacts/' +str(contact_id)+'/revisit_logs?contact_id='+str(contact_id)
params = {
'utf8':'✓',
'authenticity_token':self.csrf,
'revisit_log[category]':'89825',
'revisit_log[real_revisit_at]':self.common.get_today_str_yymmddhhmm(),
'revisit_log[content]':'写跟进%s' %self.common.get_random_int(999),
'revisit_log[loggable_attributes][status]':'89822',
'revisit_log[loggable_attributes][id]':str(contact_id),
'revisit_log[remind_at]':''
}
self.common.post_response_json(url, params, '联系人列表页写跟进')
|
[
"nlnongling@163.com"
] |
nlnongling@163.com
|
ecbf157c9dc7b470bcd997729fb7a638e168fd37
|
bbbfc82402604389fb54136af421e3a41773b2e4
|
/third_party/nucleus/io/python/bed_writer_wrap_test.py
|
d0e7ffc8c620b44786cc67879a21a5892e10812e
|
[
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
ruif2009/deepvariant
|
0a75dcc66a09c3fb8dd5b40a3bf80a5ec610d2d2
|
c7fd07016577c253f81ef253aed65c416e4c0ef7
|
refs/heads/master
| 2020-03-20T15:16:25.918599
| 2018-08-29T17:33:59
| 2018-08-29T17:33:59
| 137,508,815
| 0
| 0
|
BSD-3-Clause
| 2018-06-15T16:45:39
| 2018-06-15T16:17:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
# Copyright 2018 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for BedWriter CLIF python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
from third_party.nucleus.io.python import bed_writer
from third_party.nucleus.protos import bed_pb2
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import io_utils
_DOUBLE_CLOSE_ERROR = 'Cannot close an already closed BedWriter'
_WRITE_TO_CLOSED_ERROR = 'Cannot write to closed BED stream'
class WrapBedWriterTest(parameterized.TestCase):
def setUp(self):
out_fname = test_utils.test_tmpfile('output.bed')
self.writer = bed_writer.BedWriter.to_file(
out_fname, bed_pb2.BedHeader(num_fields=12), bed_pb2.BedWriterOptions())
self.expected_bed_content = [
'chr1\t10\t20\tfirst\t100\t+\t12\t18\t255,124,1\t3\t2,6,2\t10,12,18\n',
'chr1\t100\t200\tsecond\t250\t.\t120\t180\t252,122,12\t2\t35,40\t'
'100,160\n'
]
self.record = bed_pb2.BedRecord(
reference_name='chr1', start=20, end=30, name='r')
def test_writing_canned_records(self):
"""Tests writing all the records that are 'canned' in our tfrecord file."""
# This file is in TFRecord format.
tfrecord_file = test_utils.genomics_core_testdata(
'test_regions.bed.tfrecord')
header = bed_pb2.BedHeader(num_fields=12)
writer_options = bed_pb2.BedWriterOptions()
bed_records = list(
io_utils.read_tfrecords(tfrecord_file, proto=bed_pb2.BedRecord))
out_fname = test_utils.test_tmpfile('output.bed')
with bed_writer.BedWriter.to_file(out_fname, header,
writer_options) as writer:
for record in bed_records:
writer.write(record)
with tf.gfile.GFile(out_fname, 'r') as f:
self.assertEqual(f.readlines(), self.expected_bed_content)
def test_context_manager(self):
with self.writer:
# Writing within the context manager succeeds.
self.assertIsNone(self.writer.write(self.record))
# self.writer should be closed, so writing again will fail.
with self.assertRaisesRegexp(ValueError, _WRITE_TO_CLOSED_ERROR):
self.writer.write(self.record)
def test_double_context_manager(self):
with self.writer:
# Writing within the context manager succeeds.
self.assertIsNone(self.writer.write(self.record))
with self.assertRaisesRegexp(ValueError, _DOUBLE_CLOSE_ERROR):
# Entering the closed writer should be fine.
with self.writer:
pass # We want to raise an error on exit, so nothing to do in context.
if __name__ == '__main__':
absltest.main()
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
b755c369082d791d6f7a3edce83be38b1fe2d9fc
|
467bda4ef6b73138d5a55cad0a588021b337e4cb
|
/Built-inFunctions/lambda.py
|
af3c48cc6d800caed1560034d817a4e1d34c5927
|
[] |
no_license
|
eBLDR/MasterNotes_Python
|
c118c269de6d4880158382621c7a604e701c6101
|
0f6baadd5e7398c5e74f62ca45e44aa1ed85def0
|
refs/heads/master
| 2022-11-06T11:03:33.039026
| 2022-10-24T10:42:27
| 2022-10-24T10:42:27
| 143,124,690
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,561
|
py
|
"""
map() and filter() work usually together with lambda functions.
lambda was considered was dropped from Python 2 when migrating to
Python 3, but they finally remained.
All functions created with lambda operator can also be created using the
normal way of defining a function.
"""
# lambda operator is a way to create anonymous functions
# syntax is - lambda argument_lit: expression
add = lambda x, y: x + y
print(add(3, 4))
print('=' * 20)
# lambda with map()
C = [39.2, 36.5, 37.0, 38.1, 40.3] # A list with degrees Celsius
# Creating a list with degrees Fahrenheit
F = list(map(lambda x: x * 9 / 5 + 32, C))
print(F)
# Equivalence creating a function and using list comprehension
def converter(n):
return n * 9 / 5 + 32
F_comp = [converter(x) for x in C]
print(F_comp)
print('=' * 20)
# lambda with filter()
fibonacci = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
odd_fib = list(filter(lambda x: x % 2, fibonacci))
# Remember that bool(int!=0) is True
print('bool(2) is {}'.format(bool(2))) # only bool(0) is False
print(odd_fib)
# equivalence using list comprehension
odd_fib_comp = [x for x in fibonacci if x % 2 != 0]
print(odd_fib_comp)
print('=' * 20)
# lambda with reduce()
from functools import reduce
# Returns the largest value
f = lambda a, b: a if (a > b) else b
print(reduce(f, [47, 11, 42, 102, 13]))
# Returns the sum of all numbers
print(reduce(lambda x, y: x + y, range(1, 101)))
print('=' * 20)
class StringConversionMethod:
"""Class for defining string conversion methods."""
def __init__(self, name, execute_func):
self.name = name
self.execute_func = execute_func
def __str__(self):
return self.name
def apply(self, string_to_convert):
converted_string = ''
try:
converted_string = self.execute_func(string_to_convert)
except Exception as exc:
print('Failed to apply conversion: ' + self.name)
print(exc)
return converted_string
my_rules = [
StringConversionMethod(
'Strip everything after 30 characters',
(lambda x: x[:30])
),
StringConversionMethod(
'Add 10 whitespaces before last character',
(lambda x: x[:-1] + ' ' * 10 + x[-1])
)
]
my_string = 'This is a test string for conversion purposes. Feel free to ' \
'update me if you wish to.'
print('Before conversion: ' + my_string)
for rule in my_rules:
print('Applying rule: ' + rule.name + ' ...')
my_string = rule.apply(my_string)
print('After conversion: ' + my_string)
|
[
"ed.bldr@gmail.com"
] |
ed.bldr@gmail.com
|
74a70d271549abfa376c0130e321e0d731200223
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02859/s935054625.py
|
63bcf2523f8e10da7b99ce4225db4db57c27ddd3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
#from statistics import median
#import collections
#aa = collections.Counter(a) # list to list || .most_common(2)で最大の2個とりだせるお a[0][0]
from fractions import gcd
from itertools import combinations,permutations,accumulate # (string,3) 3回
#from collections import deque
from collections import deque,defaultdict,Counter
import decimal
import re
#import bisect
#
# d = m - k[i] - k[j]
# if kk[bisect.bisect_right(kk,d) - 1] == d:
#
#
#
# pythonで無理なときは、pypyでやると正解するかも!!
#
#
# my_round_int = lambda x:np.round((x*2 + 1)//2)
# 四捨五入
import sys
sys.setrecursionlimit(10000000)
mod = 10**9 + 7
#mod = 9982443453
def readInts():
return list(map(int,input().split()))
def I():
return int(input())
print(I()**2)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
346dab3b4f521560c406a72391886bc26d742ca6
|
febb7a4b889c2f40637e2b688eb770cf0809226f
|
/fython/test/importpec_fort/fort_slice_import_test.py
|
f267833fb682e1cdab2266b4ad94ed42a1be4a25
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nicolasessisbreton/fython
|
68253552c626640b5efc2a7cea9384c8e0425c08
|
988f5a94cee8b16b0000501a22239195c73424a1
|
refs/heads/master
| 2021-01-10T07:10:06.793158
| 2017-08-25T17:27:05
| 2017-08-25T17:27:05
| 50,076,320
| 48
| 3
| null | 2016-08-21T17:16:12
| 2016-01-21T02:30:31
|
Python
|
UTF-8
|
Python
| false
| false
| 351
|
py
|
s="""
.a.f90
module a
integer :: x_a = 10
integer :: y_a = 20
end module
.b.fy
import .a(
x_a,
y_a = y,
)
int x = 1
print 'in b {:x} {:x_a} {:y}'
"""
from fython.test import *
shell('rm -rf a/ a.* b.* c.*')
writer(s)
w = load('.b', force=1, release=1, verbose=0, run_main=0)
# print(open(w.module.url.fortran_path, 'r').read())
|
[
"contact@nicolasessisbreton.com"
] |
contact@nicolasessisbreton.com
|
f40d8834fbc04247849b75f9d35c610536b825fc
|
af7466d6abfcce9e02efe91abe1875fbcf8d04aa
|
/tests/test_handlers.py
|
08958e97ccff36b29be6c13a70d7d8ae978fcf34
|
[] |
no_license
|
NYPL/sfr-oclc-catalog-lookup
|
eb1472d1a6cab85734b4c0ac6648de846e5b00fb
|
4bf3bde518211870d6c20cde840c57bd83c1816c
|
refs/heads/development
| 2020-04-15T07:45:59.184860
| 2020-02-05T21:22:32
| 2020-02-05T21:22:32
| 164,501,003
| 1
| 1
| null | 2020-02-05T21:22:34
| 2019-01-07T21:43:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,023
|
py
|
import json
import unittest
from unittest.mock import patch, mock_open, call
# Set this variable here as it gets checked at loadtime
import os
os.environ['OUTPUT_REGION'] = 'us-test-1'
from service import handler
from helpers.errorHelpers import NoRecordsReceived, OCLCError, DataError
from lib.outPutManager import OutputManager
class TestHandler(unittest.TestCase):
@patch('service.fetchData', return_value='oclcResponse')
@patch.object(OutputManager, 'formatResponse', return_value='outObject')
def test_handler_clean(self, mockResponse, mockFetch):
testRec = {
'queryStringParameters': {
'identifier': '000000000',
'type': 'oclc'
}
}
resp = handler(testRec, None)
self.assertEqual(resp, 'outObject')
mockFetch.assert_called_once_with('000000000', 'oclc')
mockResponse.assert_called_once_with(200, 'oclcResponse')
@patch.object(OutputManager, 'formatResponse', return_value='outObject')
def test_handler_error_bad_parameters(self, mockResponse):
testRec = {
'queryStringParameters': {
'identifier': '000000000',
}
}
resp = handler(testRec, None)
self.assertEqual(resp, 'outObject')
mockResponse.assert_called_once_with(
400,
{'message': 'GET query must include identifier and type'}
)
@patch('service.fetchData', side_effect=OCLCError('Test Error'))
@patch.object(OutputManager, 'formatResponse', return_value='outObject')
def test_handler_internal_error(self, mockResponse, mockFetch):
testRec = {
'queryStringParameters': {
'identifier': '000000000',
'type': 'oclc'
}
}
resp = handler(testRec, None)
self.assertEqual(resp, 'outObject')
mockFetch.assert_called_once_with('000000000', 'oclc')
mockResponse.assert_called_once_with(500, {'message': 'Test Error'})
|
[
"mwbenowitz@gmail.com"
] |
mwbenowitz@gmail.com
|
9b2c0ade2bac3fb04dd982ba529d79dd7992559d
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/codegen/export_codegen_goal_test.py
|
98f758e8e1525846c7e650945edeebd056882484
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,804
|
py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
import pytest
from pants.backend.codegen.export_codegen_goal import ExportCodegen
from pants.backend.codegen.export_codegen_goal import rules as write_codegen_rules
from pants.core.target_types import FileSourceField, ResourceSourceField
from pants.core.util_rules import distdir
from pants.engine.fs import CreateDigest, FileContent, Snapshot
from pants.engine.rules import Get, rule
from pants.engine.target import (
GeneratedSources,
GenerateSourcesRequest,
MultipleSourcesField,
SingleSourceField,
Target,
)
from pants.engine.unions import UnionRule
from pants.testutil.rule_runner import RuleRunner
class Gen1Sources(MultipleSourcesField):
pass
class Gen2Sources(SingleSourceField):
pass
class Gen1Target(Target):
alias = "gen1"
core_fields = (Gen1Sources,)
class Gen2Target(Target):
alias = "gen2"
core_fields = (Gen2Sources,)
class Gen1Request(GenerateSourcesRequest):
input = Gen1Sources
output = FileSourceField
class Gen2Request(GenerateSourcesRequest):
input = Gen2Sources
output = ResourceSourceField
class GenNoExportRequest(GenerateSourcesRequest):
"""The presence of this generator is simply to verify that is not used when running the export-
codegen goal."""
input = Gen1Sources
output = Gen2Sources
exportable = False
class Gen1DuplicatedRequest(GenerateSourcesRequest):
input = Gen1Sources
output = ResourceSourceField
@rule
async def gen1(_: Gen1Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("assets/README.md", b"Hello!")]))
return GeneratedSources(result)
@rule
async def gen2(_: Gen2Request) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("src/haskell/app.hs", b"10 * 4")]))
return GeneratedSources(result)
@rule
async def gen_no_export(_: GenNoExportRequest) -> GeneratedSources:
assert False, "Should not ever get here as `GenNoExportRequest.exportable==False`"
@rule
async def gen1_duplicated(_: Gen1DuplicatedRequest) -> GeneratedSources:
result = await Get(Snapshot, CreateDigest([FileContent("assets/DUPLICATED.md", b"Hello!")]))
return GeneratedSources(result)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*write_codegen_rules(),
gen1,
gen2,
gen_no_export,
gen1_duplicated,
UnionRule(GenerateSourcesRequest, Gen1Request),
UnionRule(GenerateSourcesRequest, Gen2Request),
UnionRule(GenerateSourcesRequest, GenNoExportRequest),
UnionRule(GenerateSourcesRequest, Gen1DuplicatedRequest),
*distdir.rules(),
],
target_types=[Gen1Target, Gen2Target],
)
def test_no_codegen_targets(rule_runner: RuleRunner, caplog) -> None:
result = rule_runner.run_goal_rule(ExportCodegen)
assert result.exit_code == 0
assert len(caplog.records) == 1
assert "No codegen files/targets matched. All codegen target types: gen1, gen2" in caplog.text
def test_export_codegen(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"BUILD": "gen1(name='gen1')\ngen2(name='gen2', source='foo.ext')\n", "foo.ext": ""}
)
result = rule_runner.run_goal_rule(ExportCodegen, args=["::"])
assert result.exit_code == 0
parent_dir = Path(rule_runner.build_root, "dist", "codegen")
assert (parent_dir / "assets" / "README.md").read_text() == "Hello!"
assert (parent_dir / "assets" / "DUPLICATED.md").read_text() == "Hello!"
assert (parent_dir / "src" / "haskell" / "app.hs").read_text() == "10 * 4"
|
[
"noreply@github.com"
] |
pantsbuild.noreply@github.com
|
133365e71eb1d8396eba4ae1e26a95f054b6856d
|
bd5e611a2d177b3f3ca58965b53e8ce0e6d2000b
|
/assignments/A5/A5Part3.py
|
1ae4028afbeec415b763cf726b263acd06b4652a
|
[] |
no_license
|
jiemojiemo/audio_signal_processing_assignments
|
680f921ad8984d1869f10fab0eae183ef19cb808
|
0d26ada375c3a54fe9dda3e490880168a17769a2
|
refs/heads/master
| 2020-04-04T14:07:36.649933
| 2018-11-09T13:50:26
| 2018-11-09T13:50:26
| 155,987,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,403
|
py
|
import numpy as np
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import stft
import utilFunctions as UF
import sineModel as SM
import matplotlib.pyplot as plt
"""
A5-Part-3: Tracking sinusoids of different amplitudes
Perform a "good" sinusoidal analysis of a signal with two sinusoidal components of different amplitudes
by specifying the parameters 'window type' and 'peak picking threshold' in the function mainlobeTracker()
below. The function should return the parameters used, true and estimated tracks of frequency components,
and their associated time stamps.
We will consider a signal that has two sinusoidal components with a very large difference in their
amplitude. We will use a synthetically generated signal with frequency components 440 Hz and 602 Hz,
s = sin(2*pi*440*t) + 2e-3*sin(2*pi*602*t). As you see the amplitude difference is large. You will
use the sound sines-440-602-hRange.wav. Listen to the sound and use sms-tools GUI or sonic visualizer
to see its spectrogram. Notice the difference in the amplitudes of its components.
You will not write any additional code in this question, but modify the parameters of the code to obtain
the best possible results. There are three functions we have written for you. Please go through each
function and understand it, but do not modify any of it.
1. mainlobeTracker(): This is the main function. Uses sineModel.py for sinusoidal analysis of the input
sound. It takes an input audio file and uses the function sineModel.sineModelAnal(), tracks the mainlobes
of the two sinusoids to obtain the two frequency tracks (fTrackEst) in the signal. It also computes the
estimation error (meanErr) in frequency using the true frequency tracks obtained using genTrueFreqTracks().
mainlobeTracker() calls the following two functions:
2. genTimeStamps(): Generates the time stamps at which the sinuosid frequencies are estimated (one
value per audio frame)
3. genTrueFreqTracks(): For the input sound sines-440-602-hRange.wav, the function generates the true
frequency values, so that we can compare the true and the estimated frequency values.
We will use sinusoidal Model to analyse this sound and extract the two components. We will use the
sineModel.sineModelAnal() function for analysis. The code for analysis is already provided below with
some parameters we have fixed. For analysis, we will use a window length (M) of 2047 samples, an FFT
size (N) of 4096 samples and hop size (H) of 128 samples. For sine tracking, we set the minimum sine
duration (minSineDur) to 0.02 seconds, freqDevOffset to 10 Hz and freqDevSlope to its default value of
0.001. Since we need only two frequency component estimates at every frame, we set maxnSines = 2.
Choose the parameters window and the peak picking threshold (t) such that the mean estimation error of
each frequency components is less than 2 Hz. There is a range of values of M and t for which this is
true and all of those values will be considered correct answers. You can plot the estimated and true
frequency tracks to visualize the accuracy of estimation. The output is the set of parameters you used:
window, t, the time stamps, estimated and the true frequency tracks. Note that choosing the wrong window
might lead to tracking of one of the sidelobes of the high amplitude sinusoid instead of the mainlobe of
the low amplitude sinusoid.
We have written the function mainlobeTracker() and you have to edit the window and t values. For the window, choose
one of 'boxcar', 'hanning', 'hamming', 'blackman', or 'blackmanharris'. t is specified in negative dB. These two
parameters are marked as XX and you can edit the values as needed.
As an example, choosing window = 'boxcar', t = -80.0, the mean estimation error is [0.142, 129.462] Hz.
"""
def mainlobeTracker(inputFile = '../../sounds/sines-440-602-hRange.wav'):
"""
Input:
inputFile (string): wav file including the path
Output:
window (string): The window type used for analysis
t (float) = peak picking threshold (negative dB)
tStamps (numpy array) = A Kx1 numpy array of time stamps at which the frequency components were estimated
fTrackEst = A Kx2 numpy array of estimated frequency values, one row per time frame, one column per component
fTrackTrue = A Kx2 numpy array of true frequency values, one row per time frame, one column per component
"""
# Analysis parameters: Modify values of the parameters marked XX
window = 'blackmanharris' # Window type
t = -80 # threshold (negative dB)
### Go through the code below and understand it, do not modify anything ###
M = 2047 # Window size
N = 4096 # FFT Size
H = 128 # Hop size in samples
maxnSines = 2
minSineDur = 0.02
freqDevOffset = 10
freqDevSlope = 0.001
# read input sound
fs, x = UF.wavread(inputFile)
w = get_window(window, M) # Compute analysis window
tStamps = genTimeStamps(x.size, M, fs, H) # Generate the tStamps to return
# analyze the sound with the sinusoidal model
fTrackEst, mTrackEst, pTrackEst = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
fTrackTrue = genTrueFreqTracks(tStamps) # Generate the true frequency tracks
tailF = 20
# Compute mean estimation error. 20 frames at the beginning and end not used to compute error
meanErr = np.mean(np.abs(fTrackTrue[tailF:-tailF,:] - fTrackEst[tailF:-tailF,:]),axis=0)
print ("Mean estimation error = " + str(meanErr) + ' Hz') # Print the error to terminal
# Plot the estimated and true frequency tracks
mX, pX = stft.stftAnal(x, w, N, H)
maxplotfreq = 900.0
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(tStamps, binFreq, np.transpose(mX[:,:int(N*maxplotfreq/fs+1)]), cmap='hot_r')
plt.plot(tStamps,fTrackTrue, 'o-', color = 'c', linewidth=3.0)
plt.plot(tStamps,fTrackEst, color = 'y', linewidth=2.0)
plt.legend(('True f1', 'True f2', 'Estimated f1', 'Estimated f2'))
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.autoscale(tight=True)
return window, float(t), tStamps, fTrackEst, fTrackTrue # Output returned
### Do not modify this function
def genTimeStamps(xlen, M, fs, H):
# Generates the timeStamps as needed for output
hM1 = int(np.floor((M+1)/2))
hM2 = int(np.floor(M/2))
xlen = xlen + 2*hM2
pin = hM1
pend = xlen - hM1
tStamps = np.arange(pin,pend,H)/float(fs)
return tStamps
### Do not modify this function
def genTrueFreqTracks(tStamps):
# Generates the true frequency values to compute estimation error
# Specifically to sines-440-602-hRange.wav
fTrack = np.zeros((len(tStamps),2))
fTrack[:,0] = np.transpose(440*np.ones((len(tStamps),1)))
fTrack[:,1] = np.transpose(602*np.ones((len(tStamps),1)))
return fTrack
|
[
"460706836@qq.com"
] |
460706836@qq.com
|
60cc252d5efc1565cc179eb8a6d0416f4faf6e6c
|
422e3fb2aeaa9853ba6ac809bc3d0f1ee507ae27
|
/funcionario/urls.py
|
20fd28ee4e17eb598a1defa4a92c7f3292785a4c
|
[] |
no_license
|
redcliver/oldRodao
|
e279343de24952b7b86df50ff07f34be83881db6
|
0acc4440f9b7f92599fe4ef7a1d1af5bed52a81b
|
refs/heads/master
| 2020-03-07T19:12:14.066837
| 2018-07-25T20:15:50
| 2018-07-25T20:15:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.func),
url(r'^busca', views.busca),
url(r'^editar', views.editar),
]
|
[
"igor-peres@hotmail.com"
] |
igor-peres@hotmail.com
|
76f610cdb180210a08cc572c6227d53a33bab208
|
fcde32709c62b8ee86da459bb7c8eee52c848118
|
/爬虫1905/day07/京东.py
|
564987878e9830e8db1ae8ffd2b55bd530e805a6
|
[] |
no_license
|
klaus2015/py_base
|
6b92d362c3d7dc0e09205a037f4d580381dac94d
|
ec32c731c1c2f6a0dab87f1d167397e4fa86b8de
|
refs/heads/master
| 2022-07-28T15:49:30.383648
| 2020-05-11T15:31:43
| 2020-05-11T15:31:43
| 261,777,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
from selenium import webdriver
import time
class JDSpider:
def __init__(self):
self.url = 'https://www.jd.com/'
self.browser = webdriver.Chrome()
def get_html(self):
so = '//*[@id="key"]'
button = '//*[@id="search"]/div/div[2]/button'
self.browser.get(self.url)
self.browser.find_element_by_xpath(so).send_keys('爬虫书')
self.browser.find_element_by_xpath(button).click()
time.sleep(4)
def parse_html(self):
li_list = self.browser.find_elements_by_xpath('//*[@id="J_goodsList"]/ul/li')
for li in li_list:
item = {}
item['price'] = li.find_element_by_xpath('.//div[@class="p-price"]').text.strip()
item['name'] = li.find_element_by_xpath('.//div[@class="p-name"]/a/em').text.strip()
item['comment'] = li.find_element_by_xpath('.//div[@class="p-commit"]/strong').text.strip()
item['market'] = li.find_element_by_xpath('.//div[@class="p-shopnum"]').text.strip()
print(item)
def main(self):
self.get_html()
while True:
self.parse_html()
if self.browser.page_source.find('pn-next disabled') == -1:
# 不是最后1页,找到下一页按钮
self.browser.find_element_by_class_name('pn-next').click()
time.sleep(3)
else:
break
if __name__ == '__main__':
j = JDSpider()
j.main()
|
[
"598467866@qq.com"
] |
598467866@qq.com
|
e145f42d9f464a5eefd91e30bdb2bcfe77fba0d7
|
caaf1b0754db1e676c37a6f1e58f19183754e654
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2023_01_01/operations/_private_link_resources_operations.py
|
ee2978f9ad1fbe77a9f34143f1f9ed6b203ac70f
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
rdomenzain/azure-sdk-for-python
|
45dfb39121a0abda048c22e7309733a56259f525
|
58984255aeb904346b6958c5ba742749a2cc7d1b
|
refs/heads/master
| 2023-07-07T06:53:12.967120
| 2023-07-04T16:27:37
| 2023-07-04T16:27:37
| 258,050,134
| 0
| 0
|
MIT
| 2020-04-23T00:12:14
| 2020-04-23T00:12:13
| null |
UTF-8
|
Python
| false
| false
| 6,733
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, resource_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"resourceName": _SERIALIZER.url(
"resource_name",
resource_name,
"str",
max_length=63,
min_length=1,
pattern=r"^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$",
),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2023_01_01.ContainerServiceClient`'s
:attr:`private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.PrivateLinkResourcesListResult:
"""Gets a list of private link resources in the specified managed cluster.
To learn more about private clusters, see:
https://docs.microsoft.com/azure/aks/private-clusters.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourcesListResult or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2023_01_01.models.PrivateLinkResourcesListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-01"))
cls: ClsType[_models.PrivateLinkResourcesListResult] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateLinkResourcesListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateLinkResources"
}
|
[
"noreply@github.com"
] |
rdomenzain.noreply@github.com
|
836f58f4da328567c3d68ec5c6b81c8bb68ba7b9
|
7393ac624a6d6fda7d427ed58d6100a3e0fc8f53
|
/workflows/launcher/launch_gruneisen_lammps_gan.py
|
b74df62c40fe577e7a1c3398e66569e85c688bb2
|
[
"MIT"
] |
permissive
|
abelcarreras/aiida_extensions
|
9f460efe81bbf1817ccbd630ba253fbb2b074f3f
|
cce3fe537d041fdae87c5a60ce433e6de3fc30cf
|
refs/heads/master
| 2020-12-30T14:20:16.919507
| 2017-10-12T10:19:29
| 2017-10-12T10:19:29
| 91,312,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
from aiida import load_dbenv
load_dbenv()
from aiida.orm import Code, DataFactory, load_node, WorkflowFactory
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
import numpy as np
# GaN [-37000 bar <-> 23000 bar]
cell = [[ 3.1900000572, 0, 0],
[-1.5950000286, 2.762621076, 0],
[ 0.0, 0, 5.1890001297]]
scaled_positions=[(0.6666669, 0.3333334, 0.0000000),
(0.3333331, 0.6666663, 0.5000000),
(0.6666669, 0.3333334, 0.3750000),
(0.3333331, 0.6666663, 0.8750000)]
symbols=['Ga', 'Ga', 'N', 'N']
structure = StructureData(cell=cell)
positions = np.dot(scaled_positions, cell)
for i, scaled_position in enumerate(scaled_positions):
structure.append_atom(position=np.dot(scaled_position, cell).tolist(),
symbols=symbols[i])
structure.store()
lammps_machine = {
'num_machines': 1,
'parallel_env': 'mpi*',
'tot_num_mpiprocs': 16}
parameters_opt = {'relaxation': 'tri', # iso/aniso/tri
# 'pressure': 0.0, # In Gruneisen workflow this is ignored. Pressure is set in workflow arguments
'vmax': 0.000001, # Angstrom^3
'energy_tolerance': 1.0e-25, # eV
'force_tolerance': 1.0e-25, # eV angstrom
'max_evaluations': 1000000,
'max_iterations': 500000}
# Cluster information
machine_dict = {
'num_machines': 1,
'parallel_env':'mpi*',
'tot_num_mpiprocs' : 16}
# Phonopy input parameters
phonopy_parameters = {'supercell': [[3, 0, 0],
[0, 3, 0],
[0, 0, 3]],
'primitive': [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
'distance': 0.01,
'mesh': [40, 40, 40],
'symmetry_precision': 1e-5}
# GaN Tersoff
tersoff_gan = {'Ga Ga Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 1.0 1.44970 410.132 2.87 0.15 1.60916 535.199',
'N N N' : '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 1.0 2.38426 423.769 2.20 0.20 3.55779 1044.77',
'Ga Ga N' : '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N N' : '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 1.0 2.63906 3864.27 2.90 0.20 2.93516 6136.44',
'N Ga N ': '1.0 0.766120 0.000 0.178493 0.20172 -0.045238 1.0 0.0 0.00000 0.00000 2.20 0.20 0.00000 0.00000',
'N N Ga': '1.0 0.001632 0.000 65.20700 2.82100 -0.518000 1.0 0.0 0.00000 0.00000 2.90 0.20 0.00000 0.00000',
'Ga N Ga': '1.0 0.007874 1.846 1.918000 0.75000 -0.301300 1.0 0.0 0.00000 0.00000 2.87 0.15 0.00000 0.00000'}
potential ={'pair_style': 'tersoff',
'data': tersoff_gan}
# Collect workflow input data
wf_parameters = {
'structure': structure,
'phonopy_input': {'parameters': phonopy_parameters},
'input_force': {'code': 'lammps_force@boston',
'potential': potential,
'resources': lammps_machine},
'input_optimize': {'code': 'lammps_optimize@boston',
'potential': potential,
'parameters': parameters_opt,
'resources': lammps_machine},
}
#Submit workflow
WorkflowGruneisen = WorkflowFactory('wf_gruneisen_pressure')
wf = WorkflowGruneisen(params=wf_parameters, pre_optimize=False) # pressure in kb
wf.label = 'Gruneisen GaN'
wf.start()
print ('pk: {}'.format(wf.pk))
|
[
"abelcarreras83@gmail.com"
] |
abelcarreras83@gmail.com
|
5642ee169b46ea3d6ea0401b70298635c483407b
|
304033f60097c489cbc60aab639be45ccdbef1a5
|
/algorithms/boj/backtracking/15654.py
|
8c2f51d67360895663b0b2f353ca4a026687c41b
|
[] |
no_license
|
pgw928/TIL
|
3d0c47c07bd1f5c73826daf8579a2b0e3f93cb95
|
765906f1e6eecad4ad8ec9bf704041433d7eb304
|
refs/heads/master
| 2023-06-29T05:46:30.039815
| 2021-08-10T17:38:11
| 2021-08-10T17:38:11
| 288,923,095
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
import sys
n, m = map(int, sys.stdin.readline().split())
nums = sorted(map(int, sys.stdin.readline().split()))
check = { i:None for i in range(1, m+1)}
def sol(count):
for j in nums:
if is_promising(check, j):
if count == m:
print(' '.join(tuple(map(str, check.values()))))
return
check[count+1] = j
sol(count+1)
check[count+1] = None
def is_promising(check, j):
i = 1
while check[i]!=None and i<m:
if check[i]==j:
return False
i += 1
return True
sol(0)
|
[
"pku928@naver.com"
] |
pku928@naver.com
|
42e0ee2ee493ebde7c93afaf2deefaac986dbdec
|
ac5d55e43eb2f1fb8c47d5d2a68336eda181d222
|
/DynamicProgramming/97. Interleaving String.py
|
0e401e739d87b2005763a77deaff14da8c93d611
|
[] |
no_license
|
tinkle1129/Leetcode_Solution
|
7a68b86faa37a3a8019626e947d86582549374b3
|
1520e1e9bb0c428797a3e5234e5b328110472c20
|
refs/heads/master
| 2021-01-11T22:06:45.260616
| 2018-05-28T03:10:50
| 2018-05-28T03:10:50
| 78,925,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
# - * - coding:utf8 - * - -
###########################################
# Author: Tinkle
# E-mail: shutingnjupt@gmail.com
# Name: Interleaving String.py
# Creation Time: 2017/7/29
###########################################
'''
Given s1, s2, s3, find whether s3 is formed by the interleaving of s1 and s2.
For example,
Given:
s1 = "aabcc",
s2 = "dbbca",
When s3 = "aadbbcbcac", return true.
When s3 = "aadbbbaccc", return false.
'''
class Solution(object):
def isInterleave(self, s1, s2, s3):
"""
:type s1: str
:type s2: str
:type s3: str
:rtype: bool
"""
if len(s1)+len(s2)!=len(s3): return False
dp=[[False for i in range(len(s2)+1)] for j in range(len(s1)+1)]
dp[0][0]=True
for i in range(1,len(s1)+1):
dp[i][0] = dp[i-1][0] and s3[i-1]==s1[i-1]
for i in range(1,len(s2)+1):
dp[0][i] = dp[0][i-1] and s3[i-1]==s2[i-1]
for i in range(1,len(s1)+1):
for j in range(1,len(s2)+1):
dp[i][j] = (dp[i-1][j] and s1[i-1]==s3[i+j-1]) or (dp[i][j-1] and s2[j-1]==s3[i+j-1])
return dp[len(s1)][len(s2)]
s1 = 'aabcc'
s2 = 'dbbca'
s3 = 'aadbbcbcac'
S = Solution()
print S.isInterleave(s1,s2,s3)
|
[
"496047829@qq.com"
] |
496047829@qq.com
|
1befb1e6f1472bf9f42e6013cb12d0bdc26e42e5
|
c92f43835821d8df2b93dfd781f890e56891f849
|
/Python3/8. String to Integer (atoi).py
|
1cee13b16a12801e2e6ea04e8dba1f0ee1846ad4
|
[] |
no_license
|
iamdoublewei/Leetcode
|
f4ae87ed8c31537098790842a72cafa5747d8588
|
e36f343aab109b051a9c3a96956c50b5580c7c15
|
refs/heads/master
| 2022-11-06T01:31:56.181800
| 2022-11-04T20:07:35
| 2022-11-04T20:07:35
| 71,944,123
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,819
|
py
|
'''
Implement atoi which converts a string to an integer.
The function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
The string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.
If the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
If no valid conversion could be performed, a zero value is returned.
Note:
Only the space character ' ' is considered as whitespace character.
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−2E31, 2E31 − 1]. If the numerical value is out of the range of representable values, INT_MAX (2E31 − 1) or INT_MIN (−2E31) is returned.
Example 1:
Input: "42"
Output: 42
Example 2:
Input: " -42"
Output: -42
Explanation: The first non-whitespace character is '-', which is the minus sign.
Then take as many numerical digits as possible, which gets 42.
Example 3:
Input: "4193 with words"
Output: 4193
Explanation: Conversion stops at digit '3' as the next character is not a numerical digit.
Example 4:
Input: "words and 987"
Output: 0
Explanation: The first non-whitespace character is 'w', which is not a numerical
digit or a +/- sign. Therefore no valid conversion could be performed.
Example 5:
Input: "-91283472332"
Output: -2147483648
Explanation: The number "-91283472332" is out of the range of a 32-bit signed integer.
Thefore INT_MIN (−2E31) is returned.
'''
class Solution:
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.strip()
if len(str) == 0: return 0
res = 0
sign = 0
for i, v in enumerate(str):
if not sign:
if v == '+':
sign = 1
elif v == '-':
sign = -1
elif v.isdigit():
sign = 1
res = res * 10 + int(v)
else:
return 0
continue
if sign:
if v.isdigit():
res = res * 10 + int(v)
else:
break
if sign * res >= 2147483647:
return 2147483647
elif sign * res <= -2147483648:
return -2147483648
else:
return sign * res
|
[
"iamdoublewei@gmail.com"
] |
iamdoublewei@gmail.com
|
58bb679db3a7eb38d6e8e0ecd200d684801d97e7
|
81d635211686b1bc87af5892bd9e0fb95cc2ddb8
|
/adwords api/googleads-python-lib-master/examples/dfp/v201511/user_team_association_service/create_user_team_associations.py
|
e98d6a7766208d4ef56d9b401e19d87e3a2da24d
|
[
"Apache-2.0"
] |
permissive
|
analyticsbot/Python-Code---Part-2
|
de2f0581258b6c8b8808b4ef2884fe7e323876f0
|
12bdcfdef4472bcedc77ae61707c25a4a09cba8a
|
refs/heads/master
| 2021-06-04T05:10:33.185766
| 2016-08-31T13:45:45
| 2016-08-31T13:45:45
| 66,679,512
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,304
|
py
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a user to a team by creating an association between them.
To determine which teams exists, run get_all_teams.py. To determine which
users exist, run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
TEAM_ID = 'INSERT_TEAM_ID_HERE'
USER_IDS = ['INSERT_USER_IDS_TO_ASSOCIATE_TO_TEAM_HERE']
def main(client, team_id, user_ids):
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201511')
user_team_associations = []
for user_id in user_ids:
user_team_associations.append(
{
'teamId': team_id,
'userId': user_id
})
# Create the user team association on the server.
user_team_associations = (
user_team_association_service.createUserTeamAssociations(
user_team_associations))
# Display results.
if user_team_associations:
for user_team_association in user_team_associations:
print ('A user team association between user with ID \'%s\' and team with'
' ID \'%s\'was created.' % (user_team_association['userId'],
user_team_association['teamId']))
else:
print 'No user team associations created.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, TEAM_ID, USER_IDS)
|
[
"ravi.shankar1788@gmail.com"
] |
ravi.shankar1788@gmail.com
|
d9ac40a91577793d0c1810e9bab0ba59b898beac
|
b6e5d86b1212103c41fed600c937afab6b19e438
|
/setup.py
|
2ff8a57dbf9db9e47d19b46a2cb9a77a8fe039aa
|
[
"MIT"
] |
permissive
|
4064w007/gamma_index
|
42e59554f84c8af389679705cd94b7017f10e141
|
887d3d83ab8779fc9f4ec73090ad032edec7ea73
|
refs/heads/master
| 2020-04-09T16:59:02.745028
| 2017-07-22T06:34:18
| 2017-07-22T06:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
#!/usr/bin/env python
from setuptools import setup, find_packages, Extension
import itertools
gamma_c = Extension('gamma', sources = ['gamma_index/gamma.c'])
options = dict(
name='gamma_index',
version='0.1.3',
packages=find_packages(),
license='MIT',
include_package_data = True,
description='gamma_index - calculation of gamma index on multi-dimensional distributions',
long_description=open('README.rst').read(),
author='Jan Pipek',
author_email='jan.pipek@gmail.com',
url='https://github.com/janpipek/gamma_index',
install_requires = ['numpy'],
ext_modules = [gamma_c]
)
setup(**options)
|
[
"jan.pipek@gmail.com"
] |
jan.pipek@gmail.com
|
841412bbb38497468d17c4c073b67c60d60b2d67
|
48aa5cc42d4af35470a4ba0545dd55a0812986c7
|
/mth5/groups/reports.py
|
96261296b15466d3312fe39f881f1daee7142de8
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
kujaku11/mth5
|
4fb6e156bd93c13b558d69b866025a29abe8bae7
|
ce814702c7116f5f0034f5d43a1392f61a2c3cd5
|
refs/heads/master
| 2023-08-03T17:27:37.074071
| 2023-04-18T00:22:27
| 2023-04-18T00:22:27
| 283,883,448
| 16
| 2
|
MIT
| 2023-09-12T23:44:45
| 2020-07-30T21:49:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 17:03:53 2020
:copyright:
Jared Peacock (jpeacock@usgs.gov)
:license: MIT
"""
# =============================================================================
# Imports
# =============================================================================
import numpy as np
import h5py
from mth5.groups.base import BaseGroup
# =============================================================================
# Reports Group
# =============================================================================
class ReportsGroup(BaseGroup):
"""
Not sure how to handle this yet
"""
def __init__(self, group, **kwargs):
super().__init__(group, **kwargs)
# summary of reports
self._defaults_summary_attrs = {
"name": "summary",
"max_shape": (1000,),
"dtype": np.dtype(
[
("name", "S5"),
("type", "S32"),
("summary", "S200"),
("hdf5_reference", h5py.ref_dtype),
]
),
}
def add_report(self, report_name, report_metadata=None, report_data=None):
"""
:param report_name: DESCRIPTION
:type report_name: TYPE
:param report_metadata: DESCRIPTION, defaults to None
:type report_metadata: TYPE, optional
:param report_data: DESCRIPTION, defaults to None
:type report_data: TYPE, optional
:return: DESCRIPTION
:rtype: TYPE
"""
self.logger.error("Not Implemented yet")
|
[
"peacock.jared@gmail.com"
] |
peacock.jared@gmail.com
|
71df871980ae414b88f015be80bace9bc42fcd93
|
51b630da92fe715af6d0b8d156c3492faa93a9c2
|
/task.py
|
26b8a23956729efd3ebde74fd98833c67773e71e
|
[
"MIT"
] |
permissive
|
rezer0dai/rewheeler
|
54dc12941e65b291388875197491c248468f8c8a
|
548e5784999657199f1bc51d5a8b94d12cb27fce
|
refs/heads/master
| 2020-06-03T00:15:27.327197
| 2019-06-18T09:09:30
| 2019-06-18T09:09:30
| 191,357,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,758
|
py
|
import numpy as np
import random
CLOSE_ENOUGH = 1.15
def extract_goal(state):
return state[-4-3:-1-3]
# https://github.com/Unity-Technologies/ml-agents/blob/master/UnitySDK/Assets/ML-Agents/Examples/Reacher/Scripts/ReacherAgent.cs
def transform(obs):
return np.hstack([
obs[3+4+3+3:3+4+3+3+3], #pendulumB position
obs[:3+4+3+3], # pendulumA info
obs[3+4+3+3+3:-4-3], # pundulumB rest of info
obs[-1-3:] #speed + hand position
])
def goal_distance(goal_a, goal_b):
# assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a[:3] - goal_b[:3])
def fun_reward(s, n, goal, her): # 3D navigation
return (
-.01 * (5 * CLOSE_ENOUGH < np.abs(s[0] - goal[0])),
-.01 * (5 * CLOSE_ENOUGH < np.abs(s[1] - goal[1])),
-.01 * (5 * CLOSE_ENOUGH < np.abs(s[2] - goal[2])),
+.01 * (3 * CLOSE_ENOUGH > np.abs(s[0] - goal[0])),
+.01 * (3 * CLOSE_ENOUGH > np.abs(s[1] - goal[1])),
+.01 * (3 * CLOSE_ENOUGH > np.abs(s[2] - goal[2])),
-.01 * (1 * CLOSE_ENOUGH > np.abs(s[0] - goal[0])),
-.01 * (1 * CLOSE_ENOUGH > np.abs(s[1] - goal[1])),
-.01 * (1 * CLOSE_ENOUGH > np.abs(s[2] - goal[2])),
)
def goal_select(s, trajectory, gid):
return random.randint(0, len(trajectory)-1)
def update_goal_curyctor(n_step):
MAX_HER_STEP = 1
def update_goal(rewards, goals, states, n_goals, n_states, update, n_steps):
gid = 0
delta = 0
for i, (g, s, n_g, n, u, step) in enumerate(zip(goals, states, n_goals, n_states, update, n_steps)):
her_active = bool(sum(update[(i-MAX_HER_STEP) if MAX_HER_STEP < i else 0:i]))
if not her_active and u: # only here we do HER approach and setuping new goal
# last n-steps are by design *NOT selected* to replay anyway
gid = goal_select(s, goals[:-n_step-MAX_HER_STEP], 0)
delta = 0
if her_active or u:
if gid>=0 and gid+delta+n_step<len(goals) and i<len(states)-n_step: # actually HER goal was assigned
assert step is not None, "step is none ... {} {} {} {}".format(i, gid, delta, len(states))# 1 11 0 50
g, n_g = goals[gid+delta], goals[gid+delta+step]
delta += 1
yield (
fun_reward(s, n, g, True),
g, s,
n_g, n,
gid<0 or gid+delta+MAX_HER_STEP<len(goals)-n_step
)
return update_goal
# TEMPORARY IMPLMENTATION ~ testing on Tennis environment from UnityML framework
class Task:
def __init__(self):
from unityagents import UnityEnvironment
self.ENV = UnityEnvironment(file_name='./reach/Reacher.x86_64')
# self.ENV = UnityEnvironment(file_name='./data/Tennis.x86_64')
self.BRAIN_NAME = self.ENV.brain_names[0]
self.random_cut = None
def reset(self, seed, learn_mode):
# einfo = self.ENV.reset(config={"goal_size":4.4 * CLOSE_ENOUGH, "goal_speed":.3})[self.BRAIN_NAME]
einfo = self.ENV.reset()[self.BRAIN_NAME]
self.random_cut = random.randint(0, len(einfo.vector_observations) - 1)
states = self._reflow(einfo.vector_observations)
self._decouple(states)
return self.states
def _reflow(self, data):
return np.vstack([ data[self.random_cut:], data[:self.random_cut] ])
def _deflow(self, data):
return np.vstack([ data[-self.random_cut:], data[:-self.random_cut] ])
def _decouple(self, states):
self.goals, self.states = zip(
*[ (extract_goal(s), transform(s)) for s in states ])
self.goals = np.vstack(self.goals)
self.states = np.vstack(self.states)
def goal(self):
return self.goals#.reshape(len(self.goals), -1)
return np.zeros([20, 1])
return np.zeros([2, 1])
def step(self, actions, learn_mode):
act_env = self._deflow(actions).reshape(-1)
einfo = self.ENV.step(act_env)[self.BRAIN_NAME]
states = self._reflow(einfo.vector_observations)
dones = self._reflow(np.asarray(einfo.local_done).reshape(len(states), -1))
rewards = self._reflow(np.asarray(einfo.rewards).reshape(len(states), -1))
goods = np.ones([len(rewards), 1])
self._decouple(states)
if not learn_mode:#True:#
return actions, self.states, rewards, dones, goods
rewards = np.vstack([
fun_reward(s, None, g, False) for i, (s, g) in enumerate(zip(self.states, self.goals))
])
return actions, self.states, rewards, dones, goods
def goal_met(self, rewards):
return rewards > 30.
return rewards > .5
|
[
"aaa@bbb.ccc"
] |
aaa@bbb.ccc
|
3e4394a0646eda47d6f9ce72abd3a4216e14eb9e
|
9d24ead596fd7a4135b3ed2ed840a85ceef1cd56
|
/COVIDPandasUppgift/balls/new.py
|
07b66f9613e31b55fc20047c6eb620171a4789ec
|
[] |
no_license
|
blarpet/PRO-AXEL-JOBSON
|
656daaf2cceabc4916312fa43d3f5c050fdd513a
|
dd9a073cdc6e8ba0cd0237b0a25945c31528a53e
|
refs/heads/master
| 2023-05-08T19:56:44.803866
| 2021-05-31T11:29:07
| 2021-05-31T11:29:07
| 290,454,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
import pandas as pd
import plotly.express as px
df = pd.read_csv("National_Total_Deaths_by_Age_Group.csv")
options = []
for age in df.Age_Group:
options.append(dict(label = age, value = age))
fig = px.bar(df,x = "Age_Group", y = "Total_Cases")
fig.write_html('first_figure.html', auto_open=True)
print(df.head())
#df_AG = df[df["Age_Group"] == "0-9"]
#df_AG = df_AG.transpose().iloc[1:]
#print(df_AG)
|
[
"you@example.com"
] |
you@example.com
|
722d4642986e50ffbe82058abcf1195bc73e7946
|
5f51d41baa66867c48694e633d8ac1c757b385af
|
/0x00-python_variable_annotations/6-sum_mixed_list.py
|
95b9903a8bb6235692db2cb5b3bdc725c94ceaa4
|
[] |
no_license
|
JuanOlivares1/holbertonschool-web_back_end
|
cd7f53fbaffc837b5c569ce740542a0ef22d2363
|
43dee7a118424d8e0a12f4c2a7109f331ac73d5c
|
refs/heads/main
| 2023-08-20T14:16:28.028464
| 2021-10-07T22:55:33
| 2021-10-07T22:55:33
| 387,531,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#!/usr/bin/env python3
""" Module """
from typing import List, Union
def sum_mixed_list(mxd_lst: List[Union[int, float]]) -> float:
""" Returns the sum of all list's items """
sum: float = 0
for n in mxd_lst:
sum += n
return sum
|
[
"jdop2000@gmail.com"
] |
jdop2000@gmail.com
|
29a8e3fae3cc14190dd745fda358b6adbedd3235
|
28de04457e8ebcd1b34494db07bde8a3f25d8cf1
|
/easy/relative_ranks_506.py
|
dba84689bb3b4ada3bd233ad1d4f741db9778e21
|
[] |
no_license
|
YangXinNewlife/LeetCode
|
1df4218eef6b81db81bf2f0548d0a18bc9a5d672
|
20d3d0aa325d79c716acfc75daef32f8d4f9f1ad
|
refs/heads/master
| 2023-08-16T23:18:29.776539
| 2023-08-15T15:53:30
| 2023-08-15T15:53:30
| 70,552,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Solutions:
这里的处理办法是先将数据排序,默认的前三个元素分别是:
Gold Medal
Silver Medal
Bronze Medal
后面的用下标 + 1代替
"""
class RelativeRanks(object):
def findRelativeRanks(self, nums: List[int]) -> List[str]:
pos = {n: i + 1 for i, n in enumerate(sorted(nums, reverse=True))}
def func(x):
if pos[x] == 1:
return "Gold Medal"
elif pos[x] == 2:
return "Silver Medal"
elif pos[x] == 3:
return "Bronze Medal"
else:
return str(pos[x])
return map(func, nums)
|
[
"yangxin03@youxin.com"
] |
yangxin03@youxin.com
|
ca3048e417708c69249c76c57c3e868c4c8ba729
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p00007/s006220863.py
|
cdaec1ff23fb635a4abf54da1e7866491a47bb12
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
from math import ceil
from functools import reduce
print(reduce(lambda x, y: int(ceil(int(ceil(100000 * 1.05 / 1000) * 1000) * 1.05 / 1000) * 1000) if x == 0 else int(ceil(x * 1.05 / 1000) * 1000) , range(int(input()))))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e64374bbafac7b8b97a0251bdbbd43bf9e2035b7
|
82aada4592fc4fc8dfd6822bd37a1f6c79ee53c0
|
/mainapp/daemons/vikidict/vikidict.py
|
5cf68458abd9c82cd9e1ab610b1a5ab63d1f4ae0
|
[] |
no_license
|
Ancelada/canonizator
|
db79e9c16cdafb981e58dd933c03460a16803f90
|
d5f14dddf3ed70dc8a0c10ecbb987fdf64eb682f
|
refs/heads/master
| 2021-01-23T00:56:58.045369
| 2017-06-05T19:06:52
| 2017-06-05T19:06:52
| 85,855,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,353
|
py
|
import sys
import os
from bs4 import BeautifulSoup
import requests
import pymorphy2
import binascii
# path = os.path.dirname(sys.modules[__name__].__file__)
# path = os.path.join(path, '..')
# sys.path.insert(0, path)
# __path__ = os.path.dirname(os.path.abspath(__file__))
class Vikidict():
def __init__(self):
self.headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)',
}
self.path = os.path.dirname(os.path.abspath(__file__))
self.morth = pymorphy2.MorphAnalyzer()
def __page_soup(self, url):
page_content = self.__get_page_content(url)
return BeautifulSoup(page_content, 'lxml')
def __get_page_content(self, url):
return requests.get(url, headers=self.headers).text
def __get_synonim_list(self, page_soup):
h4 = page_soup.find_all('h4')
for tag in h4:
if tag.span.string == 'Синонимы':
h4 = tag
break
if len(h4) > 0:
try:
a_arr = h4.next_sibling.next_sibling.find_all('a')
synonims = []
for a in a_arr:
if a.get('title') != None:
print (a['title'])
synonims.append(a['title'])
return synonims
except:
return []
else:
return []
def parse_to_morph(self, word):
return self.morth.parse(word)[0]
def normalize_word(self, parsed_to_morph):
normal_form = parsed_to_morph.normal_form
return normal_form
def start(self, words):
result = []
for word in words:
POS = self.parse_to_morph(word.name).tag.POS
page_soup = self.__page_soup('https://ru.wiktionary.org/wiki/{0}'.format(word.name))
synonims = self.__get_synonim_list(page_soup)
synonims = self.__remove_different_pos(POS, synonims)
result.append({
'id': word.id,
'word': word.name,
'synonims': self.__convert_synonims(synonims)
})
return result
def __convert_synonims(self, synonims):
result = []
for synonim in synonims:
result.append({'synonim': synonim, 'crc32': self.__convert_crc32(synonim)})
return result
def __remove_different_pos(self, POS, synonims):
for key, value in enumerate(synonims):
value = self.parse_to_morph(value)
synonims[key] = value.normal_form
if value.tag.POS != POS:
del synonims[key]
return self.__remove_different_pos(POS, synonims)
return synonims
def __convert_crc32(self, value):
value_bytes=bytes(value, 'utf-8')
return binascii.crc32(value_bytes)
|
[
"danydonatto@pochta.ru"
] |
danydonatto@pochta.ru
|
e07b156b01c9f1e08b27c8f6e63b732cad71f565
|
4b7e282fe480415f5d52c0fc0429f144156190fe
|
/examples/account_management/reject_merchant_center_link.py
|
92971480f07660b2cd66bdfdfeeac1e2397d9eed
|
[
"Apache-2.0"
] |
permissive
|
Z2Xsoft/google-ads-python
|
c4750357bb19da91bb3b6bf2fa84bef9d2df36d3
|
1779d52a0446c8afb2437b0a9e103dcb849f5590
|
refs/heads/main
| 2023-08-18T15:22:17.840364
| 2021-09-26T04:08:53
| 2021-09-26T04:08:53
| 410,444,398
| 0
| 0
|
Apache-2.0
| 2021-09-26T04:08:53
| 2021-09-26T03:55:38
| null |
UTF-8
|
Python
| false
| false
| 6,207
|
py
|
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how to reject or unlink a Merchant Center link request.
Prerequisite: You need to have access to a Merchant Center account. You can find
instructions to create a Merchant Center account here:
https://support.google.com/merchants/answer/188924.
To run this example, you must use the Merchant Center UI or the Content API for
Shopping to send a link request between your Merchant Center and Google Ads
accounts. You can find detailed instructions to link your Merchant Center and
Google Ads accounts here: https://support.google.com/merchants/answer/6159060.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, merchant_center_account_id):
"""Demonstrates how to reject a Merchant Center link request.
Args:
client: An initialized Google Ads client.
customer_id: The Google Ads customer ID.
merchant_center_account_id: The Merchant Center account ID for the
account requesting to link.
"""
# Get the MerchantCenterLinkService client.
merchant_center_link_service = client.get_service(
"MerchantCenterLinkService"
)
# Get the extant customer account to Merchant Center account links.
list_merchant_center_links_response = merchant_center_link_service.list_merchant_center_links(
customer_id=customer_id
)
number_of_links = len(
list_merchant_center_links_response.merchant_center_links
)
if number_of_links == 0:
print(
"There are no current merchant center links to Google Ads "
f"account {customer_id}. This example will now exit."
)
return
print(
f"{number_of_links} Merchant Center link(s) found with the "
"following details:"
)
for (
merchant_center_link
) in list_merchant_center_links_response.merchant_center_links:
print(
f"\tLink '{merchant_center_link.resource_name}' has status "
f"'{merchant_center_link.status.name}'."
)
# Check if this is the link to the target Merchant Center account.
if merchant_center_link.id == merchant_center_account_id:
# A Merchant Center link can be pending or enabled; in both
# cases, we reject it by removing the link.
_remove_merchant_center_link(
client,
merchant_center_link_service,
customer_id,
merchant_center_link,
)
# We can terminate early since this example concerns only one
# Google Ads account to Merchant Center account link.
return
# Raise an exception if no matching Merchant Center link was found.
raise ValueError(
"No link could was found between Google Ads account "
f"{customer_id} and Merchant Center account "
f"{merchant_center_account_id}."
)
# [START reject_merchant_center_link]
def _remove_merchant_center_link(
client, merchant_center_link_service, customer_id, merchant_center_link
):
"""Removes a Merchant Center link from a Google Ads client customer account.
Args:
client: An initialized Google Ads client.
merchant_center_link_service: An initialized
MerchantCenterLinkService client.
customer_id: The Google Ads customer ID of the account that has the link
request.
merchant_center_link: The MerchantCenterLink object to remove.
"""
# Create a single remove operation, specifying the Merchant Center link
# resource name.
operation = client.get_type("MerchantCenterLinkOperation")
operation.remove = merchant_center_link.resource_name
# Send the operation in a mutate request.
response = merchant_center_link_service.mutate_merchant_center_link(
customer_id=customer_id, operation=operation
)
print(
"Removed Merchant Center link with resource name "
f"'{response.result.resource_name}'."
)
# [END reject_merchant_center_link]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description=(
"Demonstrates how to reject a Merchant Center link request."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-m",
"--merchant_center_account_id",
type=int,
required=True,
help="The Merchant Center account ID for the account requesting to "
"link.",
)
args = parser.parse_args()
try:
main(
googleads_client, args.customer_id, args.merchant_center_account_id
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
|
[
"noreply@github.com"
] |
Z2Xsoft.noreply@github.com
|
c80b24556a0be18f1c988d44226dd07ca53c1447
|
ab9196b6356e3c0af7baf7b768d7eb8112243c06
|
/Python&DataBase/5.21/HW02Pandas03_05_ClassEx05_김주현.py
|
72383d87fa38906966d5ecf55a4a45b989d9756b
|
[] |
no_license
|
wngus9056/Datascience
|
561188000df74686f42f216cda2b4e7ca3d8eeaf
|
a2edf645febd138531d4b953afcffa872ece469b
|
refs/heads/main
| 2023-07-01T00:08:00.642424
| 2021-08-07T02:10:25
| 2021-08-07T02:10:25
| 378,833,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
class FourCal:
def __init__(self,first,second):
self.first = first
self.second = second
def sum(self):
result = self.first + self.second
return result
def sub(self):
result = self.first - self.second
return result
def mul(self):
result = self.first * self.second
return result
def div(self):
result = self.first / self.second
return result
class MoreFourCal(FourCal):
def pow(self,su01):
result = su01 **2
return result
a = MoreFourCal(4,2)
print(a.first, '+', a.second, '=', a.sum())
print(a.first, '-', a.second, '=', a.sub())
print(a.first, '*', a.second, '=', a.mul())
print(a.first, '/', a.second, '=', a.div())
print('제곱출력 :', a.pow(5))
|
[
"noreply@github.com"
] |
wngus9056.noreply@github.com
|
e8dc3a1f5f2a3fdeec63fe5fd2a749367511e8ab
|
5c2e0fe391f7c720d0a6c117a64f4c8e89fece93
|
/research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py
|
e64c966d5f72e27875bfa64f49a6f3ce45694522
|
[
"Apache-2.0"
] |
permissive
|
lyltencent/tf_models_v15
|
e3bed9dfee42685118b0f3d21bb9de37d58cf500
|
0081dbe36831342051c09a2f94ef9ffa95da0e79
|
refs/heads/master
| 2022-10-20T20:00:26.594259
| 2020-09-19T05:37:22
| 2020-09-19T05:37:22
| 161,750,047
| 0
| 1
|
Apache-2.0
| 2021-03-31T21:04:01
| 2018-12-14T07:47:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import ops
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
"""MobileNetV1 Feature Extractor for SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: Whether to reuse variables. Default is None.
"""
super(SSDMobileNetV1FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams, batch_norm_trainable, reuse_weights)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
feature_map_layout = {
'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
'', ''],
'layer_depth': [-1, -1, 512, 256, 256, 128],
}
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with slim.arg_scope([slim.batch_norm], fused=False):
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
|
[
"yxl7245@eng-4150-nix03.main.ad.rit.edu"
] |
yxl7245@eng-4150-nix03.main.ad.rit.edu
|
5676800b14c8980248c0ab574043fba7b054977d
|
cacb2757d54aef112c43cc962b674582cbf1468e
|
/pumpp/core.py
|
0db13b0e93d5301aac60fc44e8c0b7a720099169
|
[
"ISC"
] |
permissive
|
justinsalamon/pumpp
|
9cf1ac6cf0dde1936b45c4d4c44728132a41d2b5
|
c8d7be644f998721a841cb43e28c8e285af225a4
|
refs/heads/master
| 2021-01-01T15:37:38.949477
| 2017-07-18T19:31:07
| 2017-07-18T19:31:07
| 97,657,947
| 2
| 0
| null | 2017-07-19T01:10:14
| 2017-07-19T01:10:14
| null |
UTF-8
|
Python
| false
| false
| 6,628
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
Core functionality
==================
.. autosummary::
:toctree: generated/
Pump
'''
import librosa
import jams
from .base import Slicer
from .exceptions import ParameterError
from .task import BaseTaskTransformer
from .feature import FeatureExtractor
from .sampler import Sampler
class Pump(Slicer):
'''Top-level pump object.
This class is used to collect feature and task transformers
Attributes
----------
ops : list of (BaseTaskTransformer, FeatureExtractor)
The operations to apply
Examples
--------
Create a CQT and chord transformer
>>> p_cqt = pumpp.feature.CQT('cqt', sr=44100, hop_length=1024)
>>> p_chord = pumpp.task.ChordTagTransformer(sr=44100, hop_length=1024)
>>> pump = pumpp.Pump(p_cqt, p_chord)
>>> data = pump.transform(audio_f='/my/audio/file.mp3',
... jam='/my/jams/annotation.jams')
Or use the call interface:
>>> data = pump(audio_f='/my/audio/file.mp3',
... jam='/my/jams/annotation.jams')
Or apply to audio in memory, and without existing annotations:
>>> y, sr = librosa.load('/my/audio/file.mp3')
>>> data = pump(y=y, sr=sr)
Access all the fields produced by this pump:
>>> pump.fields
{'chord/chord': Tensor(shape=(None, 170), dtype=<class 'bool'>),
'cqt/mag': Tensor(shape=(None, 288), dtype=<class 'numpy.float32'>),
'cqt/phase': Tensor(shape=(None, 288), dtype=<class 'numpy.float32'>)}
Access a constituent operator by name:
>>> pump['chord'].fields
{'chord/chord': Tensor(shape=(None, 170), dtype=<class 'bool'>)}
'''
def __init__(self, *ops):
self.ops = []
self.opmap = dict()
super(Pump, self).__init__(*ops)
def add(self, operator):
'''Add an operation to this pump.
Parameters
----------
operator : BaseTaskTransformer, FeatureExtractor
The operation to add
Raises
------
ParameterError
if `op` is not of a correct type
'''
if not isinstance(operator, (BaseTaskTransformer, FeatureExtractor)):
raise ParameterError('operator={} must be one of '
'(BaseTaskTransformer, FeatureExtractor)'
.format(operator))
if operator.name in self.opmap:
raise ParameterError('Duplicate operator name detected: '
'{}'.format(operator))
super(Pump, self).add(operator)
self.opmap[operator.name] = operator
self.ops.append(operator)
def transform(self, audio_f=None, jam=None, y=None, sr=None, crop=False):
'''Apply the transformations to an audio file, and optionally JAMS object.
Parameters
----------
audio_f : str
Path to audio file
jam : optional, `jams.JAMS`, str or file-like
Optional JAMS object/path to JAMS file/open file descriptor.
If provided, this will provide data for task transformers.
y : np.ndarray
sr : number > 0
If provided, operate directly on an existing audio buffer `y` at
sampling rate `sr` rather than load from `audio_f`.
crop : bool
If `True`, then data are cropped to a common time index across all
fields. Otherwise, data may have different time extents.
Returns
-------
data : dict
Data dictionary containing the transformed audio (and annotations)
Raises
------
ParameterError
At least one of `audio_f` or `(y, sr)` must be provided.
'''
if y is None:
if audio_f is None:
raise ParameterError('At least one of `y` or `audio_f` '
'must be provided')
# Load the audio
y, sr = librosa.load(audio_f, sr=sr, mono=True)
if sr is None:
raise ParameterError('If audio is provided as `y`, you must '
'specify the sampling rate as sr=')
if jam is None:
jam = jams.JAMS()
jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)
# Load the jams
if not isinstance(jam, jams.JAMS):
jam = jams.load(jam)
data = dict()
for operator in self.ops:
if isinstance(operator, BaseTaskTransformer):
data.update(operator.transform(jam))
elif isinstance(operator, FeatureExtractor):
data.update(operator.transform(y, sr))
if crop:
data = self.crop(data)
return data
def sampler(self, n_samples, duration, random_state=None):
'''Construct a sampler object for this pump's operators.
Parameters
----------
n_samples : None or int > 0
The number of samples to generate
duration : int > 0
The duration (in frames) of each sample patch
random_state : None, int, or np.random.RandomState
If int, random_state is the seed used by the random number
generator;
If RandomState instance, random_state is the random number
generator;
If None, the random number generator is the RandomState instance
used by np.random.
Returns
-------
sampler : pumpp.Sampler
The sampler object
See Also
--------
pumpp.sampler.Sampler
'''
return Sampler(n_samples, duration,
random_state=random_state,
*self.ops)
@property
def fields(self):
'''A dictionary of fields constructed by this pump'''
out = dict()
for operator in self.ops:
out.update(**operator.fields)
return out
def layers(self):
'''Construct Keras input layers for all feature transformers
in the pump.
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
fields.
'''
layermap = dict()
for operator in self.ops:
if hasattr(operator, 'layers'):
layermap.update(operator.layers())
return layermap
def __getitem__(self, key):
return self.opmap.get(key)
def __call__(self, *args, **kwargs):
return self.transform(*args, **kwargs)
|
[
"brian.mcfee@nyu.edu"
] |
brian.mcfee@nyu.edu
|
82a3aa46b605e8f17a99a67cb9b5993e4cac0a60
|
2bc7659be83178c43b1592efbe1d79c62fc4fa36
|
/Python/1253 a부터 b까지 출력하기.py
|
5543b4bb8d3d0ac5509cef8ba4a12eff8b847690
|
[] |
no_license
|
KIMSUBIN17/Code-Up-Algorithm
|
ede6f443fcf640ecf58282c582da43e124ca44af
|
831180c28d234366a1d3cf118bd2a615dc404f00
|
refs/heads/master
| 2023-07-22T21:42:06.990542
| 2021-09-05T08:36:32
| 2021-09-05T08:36:32
| 286,932,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
a, b = input().split()
a = int(a)
b = int(b)
if(a > b):
for i in range(b, a+1):
print(i, end=' ')
else:
for i in range(a, b+1):
print(i, end=' ')
|
[
"tnqls24860@naver.com"
] |
tnqls24860@naver.com
|
dc5d59c9621d108d4954db1465d6a8e5fee0b977
|
68ee9027d4f780e1e5248a661ccf08427ff8d106
|
/extra/unused/LandsatPX_doc/get_landsat_pairs.py
|
a88d250ae60b9821006277c69db2c0e9ef8eebc0
|
[
"MIT"
] |
permissive
|
whyjz/CARST
|
87fb9a6a62d39fd742bb140bddcb95a2c15a144c
|
4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b
|
refs/heads/master
| 2023-05-26T20:27:38.105623
| 2023-04-16T06:34:44
| 2023-04-16T06:34:44
| 58,771,687
| 17
| 4
|
MIT
| 2021-03-10T01:26:04
| 2016-05-13T20:54:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,628
|
py
|
import os
import glob
import math
def makeParamFiles(A,B):
dayA = (A[13:16])
yearA = (A[9:13])
dayB = (B[13:16])
yearB = (B[9:13])
file1 = open("landsat8_" + yearB +"_"+dayA+"_"+dayB+".txt","w")
file1.write(os.path.realpath(A) +" "+ os.path.realpath(B))
file1.close()
file2 = open("params_landsat8_"+yearB+"_to_"+dayA+"_to_"+dayB+"_r32x32_s32x32.txt","w")
# Change these parameters for each section as needed
file2.write("UTM_ZONE = 40 \n\
UTM_LETTER = X \n\
BAND = B8 \n\
ICE = /13t1/wjd73/Glacier_outlines/central_alaska_range/central_alaska_range_utm_ice.gmt \n\
ROCK = /13t1/wjd73/Glacier_outlines/central_alaska_range/central_alaska_range_utm_rock.gmt \n\
IMAGE_DIR = /13t1/wjd73/Franz_Joseph/Landsat8/IMAGES\n\
METADATA_DIR = /13t1/wjd73/Franz_Joseph/Landsat8/IMAGES\n\
PAIRS_DIR = /13t1/wjd73/Franz_Joseph/Landsat8/Pairs\n\
PROCESSORS = 20\n\
RESOLUTION = 15\n\
SATELLITE = Landsat8\n\
SNR_CUTOFF = 0\n\
DEM = /13t1/wjd73/Franz_Joseph/DEM/FJLREGION_DEM.tif\n\
PREFILTER = False\n\
REF_X = 32\n\
REF_Y = 32\n\
SEARCH_X = 32\n\
SEARCH_Y = 32\n\
STEP = 8\n\
M_SCRIPTS_DIR = /13t1/wjd73/MATLAB/Adam_Cleaner\n\
VEL_MAX = 5\n\
TOL = 0.3\n\
NUMDIF = 3\n\
SCALE = 1500000\n\
PAIRS = /13t1/wjd73/Franz_Joseph/Landsat8/Pairs/"+file1.name+"\n")
file2.close()
file3 = open("px_landsat8_"+yearB+"_"+dayA+"_to_"+dayB+".cmd","w")
file3.write("python /home/wjd73/Python/landsatPX.py " + file2.name+"\n")
file3.close()
def daydiff(A,B):
dayA = int(A[13:16])
yearA = int(A[9:13])
dayB = int(B[13:16])
yearB = int(B[9:13])
diff =(dayB - (dayA -(yearB-yearA)*365))
#print(str(dayA) +"\t" +str(yearA) +"\t" + str(dayB) + "\t" +str(yearB))
#print diff
return diff
###################################################
def main():
scenelist = glob.glob("*B8.TIF")
scenelist.sort()
for i in range(len(scenelist) -1):
A = scenelist[i]
B = scenelist[i+1]
#print(A + "\t" + B)
diff = daydiff(A,B)
if (diff <= 48):
print(A + "\t" + B + "\t" + str(diff))
makeParamFiles(A,B)
main()
|
[
"wz278@cornell.edu"
] |
wz278@cornell.edu
|
5d52550b78b7a096a5f07042c2f7a901e73ada2b
|
d7ccb4225f623139995a7039f0981e89bf6365a4
|
/.history/accounts/views_20211013005944.py
|
b3778559b57bf3bf0425e1678a3cc6003745a10b
|
[] |
no_license
|
tonnymuchui/django-mall
|
64fd4abc3725c1bd0a3dcf20b93b490fe9307b37
|
55c083d8433be3c77adc61939cd197902de4ce76
|
refs/heads/master
| 2023-08-23T04:59:20.418732
| 2021-10-13T15:59:37
| 2021-10-13T15:59:37
| 415,668,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from django.shortcuts import render
# Create your views here.
def register(register):
return render('register.html')
def login(register):
return render('register.html')
def log(register):
return render('register.html')
|
[
"tonykanyingah@gmail.com"
] |
tonykanyingah@gmail.com
|
ac3cb9735acbbe7d612a9d4587f73eb38dc0804d
|
14af6e17a596aa340f7a1700b213dc5d41771043
|
/space/main.py
|
63584e9f3b4a359761c6cf892ffe4d17c1d144dd
|
[
"BSD-3-Clause"
] |
permissive
|
samdmarshall/space
|
56d54d0da7a503cc1678786d0c2430ad20ebd194
|
e9d9899d856c7c20d819e03357017dd07e1c8f23
|
refs/heads/master
| 2021-04-29T07:30:41.834581
| 2017-02-09T21:03:12
| 2017-02-09T21:03:12
| 77,950,055
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,477
|
py
|
# Copyright (c) 2017, Samantha Marshall (http://pewpewthespells.com)
# All rights reserved.
#
# https://github.com/samdmarshall/space
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Samantha Marshall nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import argparse
from .version import __version__ as SPACE_VERSION
from .Logger import Logger
from . import Settings
from . import Executor
# Main
def main():
# setup the argument parsing
parser = argparse.ArgumentParser(description='space is a tool for managing workspaces')
parser.add_argument(
'--version',
help='Displays the version information',
action='version',
version=SPACE_VERSION,
)
parser.add_argument(
'--quiet',
help='Silences all logging output',
default=False,
action='store_true',
)
parser.add_argument(
'--verbose',
help='Adds verbosity to logging output',
default=False,
action='store_true',
)
parser.add_argument(
'--no-ansi',
help='Disables the ANSI color codes as part of the logger',
default=False,
action='store_true',
)
parser.add_argument(
'--debug',
help=argparse.SUPPRESS,
default=False,
action='store_true',
)
parser.add_argument(
'--list',
help='Displays a list of all available subcommands for the current working directory',
default=False,
action='store_true',
)
parser.add_argument(
'--edit',
help='Opens the space.yml file in your EDITOR',
default=False,
action='store_true',
)
parser.add_argument(
'--env',
help='Passes values into the environment you are working in',
action='store',
default='',
)
initial_arguments, remaining_args = parser.parse_known_args()
# perform the logging modifications before we do any other operations
Logger.disableANSI(initial_arguments.no_ansi)
Logger.enableDebugLogger(initial_arguments.debug)
Logger.isVerbose(initial_arguments.verbose)
Logger.isSilent(initial_arguments.quiet)
Logger.write().info('Loading the configuration for space...')
configuration = Settings.Configuration()
if initial_arguments.edit is True:
Logger.write().info('Launching in editor mode...')
if os.environ.get('EDITOR') is None:
Logger.write().critical('The value of EDITOR is not set, defaulting to nano...')
Logger.write().info('Opening the spaces.yml file in the default editor...')
Executor.Invoke((os.environ.get('EDITOR', 'nano'), configuration.get_preferences_path()))
else:
Logger.write().info('Validating configuration file...')
if configuration.is_valid() is False:
Logger.write().warning('No configuration setup for this directory!')
parser.exit(1, '')
Logger.write().info('Checking arguments...')
if initial_arguments.list is True:
message = '%s [-h] {%s}\n' % (parser.prog, '|'.join(configuration.commands()))
parser.exit(0, message)
Logger.write().info('Creating subcommand parser...')
subparsers = parser.add_subparsers(title='Subcommands', dest='command')
subparsers.required = True
Logger.write().info('Adding subcommands to command line parser...')
for command_name in configuration.commands():
Logger.write().debug('Adding command "%s"...' % command_name)
command_subparser = subparsers.add_parser(command_name)
Logger.write().info('Parsing remaining command line arguments...')
command_args = parser.parse_args(remaining_args)
Logger.write().info('Running subcommand...')
if configuration.invoke(initial_arguments.env, command_args.command) is False:
Logger.write().error('Unknown command "%s" was encountered!' % command_args.command)
parser.exit(1, '')
if __name__ == "__main__": # pragma: no cover
main()
|
[
"me@samdmarshall.com"
] |
me@samdmarshall.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.