blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4e3012192f130bacd64b59da908ea2b11713b9c | 177d7066f6a0326ed937a56174d7e2241653929a | /Array&String/lc868.py | eea41dedfac7e91ebc5f204e43c2ce47e6047543 | [] | no_license | jasonusaco/Leetcode-Practice | 276bcdb62b28806b3d297338882f4b1eef56cc13 | 91dc73202eb9952a6064013ef4ed20dfa4137c01 | refs/heads/master | 2020-07-06T08:29:09.419062 | 2019-10-10T01:43:03 | 2019-10-10T01:43:03 | 202,955,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | class Solution:
def binaryGap(self, N):
#先求出二进制
binary = bin(N)[2:]
index = -1
res = 0
for i, b in enumerate(binary):
#统计二进制中每个1里左边1的距离
if b == '1':
if index != -1:
res = max(res, i-index)
index = i
return res
| [
"yangyx@raysdata.com"
] | yangyx@raysdata.com |
276fedffda29631158f74e9252d2a17f64efed33 | 0629fd0d3c1e43022cca1ac8ba4557469a59f8ac | /Python/soar_example.py | 9b643f9bbbc86df264f9739063464eae94c03bda | [] | no_license | Kanaderu/CPS-592_TankSoar_Project | d8b6a1aaf85c9845994a08c7075851025df60035 | f3c408771da4b721f234e387ace54b4345519bcb | refs/heads/master | 2020-04-05T20:15:40.341194 | 2018-11-25T17:03:25 | 2018-11-25T17:03:25 | 157,172,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,627 | py | #!/usr/bin/env python2.7
from itertools import product
from os import environ as env, fsync
from subprocess import call, check_output, CalledProcessError, STDOUT
import re
from lib import Python_sml_ClientInterface as sml
# low level Soar functions
def create_kernel():
kernel = sml.Kernel.CreateKernelInCurrentThread()
if not kernel or kernel.HadError():
print("Error creating kernel: " + kernel.GetLastErrorDescription())
exit(1)
return kernel
def create_agent(kernel, name):
agent = kernel.CreateAgent("agent")
if not agent:
print("Error creating agent: " + kernel.GetLastErrorDescription())
exit(1)
return agent
# mid-level framework
def cli(agent):
cmd = raw_input("soar> ")
while cmd not in ("exit", "quit"):
if cmd:
print(agent.ExecuteCommandLine(cmd).strip())
cmd = raw_input("soar> ")
def parameterize_commands(param_map, commands):
return [cmd.format(**param_map) for cmd in commands]
def run_parameterized_commands(agent, param_map, commands):
for cmd in parameterize_commands(param_map, commands):
result = agent.ExecuteCommandLine(cmd)
def param_permutations(params):
keys = sorted(params.keys())
for values in product(*(params[key] for key in keys)):
yield dict(zip(keys, values))
# IO
def parse_output_commands(agent, structure):
commands = {}
mapping = {}
for cmd in range(0, agent.GetNumberCommands()):
error = False
command = agent.GetCommand(cmd)
cmd_name = command.GetCommandName()
if cmd_name in structure:
parameters = {}
for param_name in structure[cmd_name]:
param_value = command.GetParameterValue(param_name)
if param_value:
parameters[param_name] = param_value
if not error:
commands[cmd_name] = parameters
mapping[cmd_name] = command
else:
error = True
if error:
command.AddStatusError()
return commands, mapping
def dot_to_input(edges):
pass
# callback registry
def register_print_callback(kernel, agent, function, user_data=None):
agent.RegisterForPrintEvent(sml.smlEVENT_PRINT, function, user_data)
def register_output_callback(kernel, agent, function, user_data=None):
agent.RegisterForRunEvent(sml.smlEVENT_AFTER_OUTPUT_PHASE, function, user_data)
def register_output_change_callback(kernel, agent, function, user_data=None):
kernel.RegisterForUpdateEvent(sml.smlEVENT_AFTER_ALL_GENERATED_OUTPUT, function, user_data)
def register_destruction_callback(kernel, agent, function, user_data=None):
agent.RegisterForRunEvent(sml.smlEVENT_AFTER_HALTED, function, user_data)
# callback functions
def callback_print_message(mid, user_data, agent, message):
print(message.strip())
def print_report_row(mid, user_data, agent, *args):
condition = user_data["condition"]
param_map = user_data["param_map"]
domain = user_data["domain"]
reporters = user_data["reporters"]
if condition(param_map, domain, agent):
pairs = []
pairs.extend("=".join([k, str(v)]) for k, v in param_map.items())
pairs.extend("{}={}".format(*reporter(param_map, domain, agent)) for reporter in reporters)
print(" ".join(pairs))
def report_data_wrapper(param_map, domain, reporters, condition=None):
if condition is None:
condition = (lambda param_map, domain, agent: True)
return {
"condition": condition,
"param_map": param_map,
"domain": domain,
"reporters": reporters,
}
# common reporters
def branch_name(param_map, domain, agent):
result = re.sub(".* ", "", check_output(("ls", "-l", "{}/SoarSuite/Core".format(env["HOME"])))).strip()
return "branch", result
def avg_decision_time(param_map, domain, agent):
result = re.sub(".*\((.*) msec/decision.*", r"\1", agent.ExecuteCommandLine("stats"), flags=re.DOTALL)
return "avg_dc_msec", result
def max_decision_time(param_map, domain, agent):
result = re.sub(".* Time \(sec\) *([0-9.]*).*", r"\1", agent.ExecuteCommandLine("stats -M"), flags=re.DOTALL)
return "max_dc_msec", float(result) * 1000
"""
stats
39952 decisions (1.172 msec/decision)
309943 elaboration cycles (7.758 ec's per dc, 0.151 msec/ec)
309943 inner elaboration cycles
136794 p-elaboration cycles (3.424 pe's per dc, 0.342 msec/pe)
391783 production firings (1.264 pf's per ec, 0.120 msec/pf)
3174729 wme changes (1587424 additions, 1587305 removals)
WM size: 119 current, 1497.991 mean, 18978 maximum
"""
"""
stats -M
Single decision cycle maximums:
Stat Value Cycle
---------------- ----------- -----------
Time (sec) 0.050799 11634
EpMem Time (sec) 0.000000 0
SMem Time (sec) 0.000000 0
WM changes 19096 11634
Firing count 792 11634
"""
# soar code management
def make_branch(branch):
try:
stdout = check_output(("make-branch", branch), stderr=STDOUT)
return True
except CalledProcessError as cpe:
return False
if __name__ == "__main__":
kernel = create_kernel()
agent = create_agent(kernel, "agent")
register_print_callback(kernel, agent, callback_print_message, None)
#for source in sys.argv[1:]:
# print(agent.ExecuteCommandLine("source " + source))
print(agent.ExecuteCommandLine("source Project_TankSoar/tanksoar.soar"))
cli(agent)
kernel.DestroyAgent(agent)
kernel.Shutdown()
del kernel
| [
"fand.kanade@gmail.com"
] | fand.kanade@gmail.com |
696d4534570dce824e44730757a075a401e73fef | 75f5343fdde18a99373fdfafcc5eb1669d008479 | /tests/santa/test_configuration.py | 3761069d15c1069b586e0019d7a001640da128fb | [
"Apache-2.0"
] | permissive | BrandwatchLtd/zentral | 5a756bb7fc291bfd17eeff0eebcfe7a2867c5d91 | 4dfc96020ea04eb033369022bd1b1f289dc7e315 | refs/heads/master | 2022-11-17T08:58:09.262613 | 2020-07-15T10:55:31 | 2020-07-15T10:55:31 | 280,380,291 | 0 | 0 | null | 2020-07-17T09:13:48 | 2020-07-17T09:13:48 | null | UTF-8 | Python | false | false | 2,367 | py | from django.test import TestCase
from django.utils.crypto import get_random_string
from zentral.contrib.santa.models import Configuration
class SantaAPIViewsTestCase(TestCase):
def test_local_configuration_url_keys(self):
more_info_url = "https://{}.de".format(get_random_string(34))
file_changes_prefix_filters = "/private/tmp/"
config = Configuration.objects.create(name=get_random_string(256),
more_info_url=more_info_url,
file_changes_prefix_filters=file_changes_prefix_filters,
enable_bad_signature_protection=True)
local_config = config.get_local_config()
self.assertEqual(local_config["MoreInfoURL"], more_info_url)
self.assertEqual(local_config["FileChangesPrefixFilters"], file_changes_prefix_filters)
self.assertEqual(local_config["EnableBadSignatureProtection"], True)
def test_blacklist_regex_default_whitelist_regex(self):
blacklist_regex = get_random_string(34)
config = Configuration.objects.create(name=get_random_string(256),
blacklist_regex=blacklist_regex)
local_config = config.get_local_config()
self.assertEqual(local_config["BlacklistRegex"], blacklist_regex)
self.assertTrue("WhitelistRegex" not in local_config)
sync_server_config = config.get_sync_server_config()
self.assertEqual(sync_server_config["blacklist_regex"], blacklist_regex)
self.assertTrue(sync_server_config["whitelist_regex"].startswith("NON_MATCHING_PLACEHOLDER_"))
def test_whitelist_regex_default_blacklist_regex(self):
whitelist_regex = get_random_string(34)
config = Configuration.objects.create(name=get_random_string(256),
whitelist_regex=whitelist_regex)
local_config = config.get_local_config()
self.assertEqual(local_config["WhitelistRegex"], whitelist_regex)
self.assertTrue("BlacklistRegex" not in local_config)
sync_server_config = config.get_sync_server_config()
self.assertEqual(sync_server_config["whitelist_regex"], whitelist_regex)
self.assertTrue(sync_server_config["blacklist_regex"].startswith("NON_MATCHING_PLACEHOLDER_"))
| [
"eric.falconnier@112hz.com"
] | eric.falconnier@112hz.com |
cbfc8eed18ab289f6f73f49da45df553a87c1de3 | dbbdf35bff726681ae34ad08eeda5f30929e2ae9 | /unsupervised_learning/0x01-clustering/7-maximization.py | 626c470c16f6df35773088ad85137dd2057c934c | [] | no_license | jorgezafra94/holbertonschool-machine_learning | 0b7f61c954e5d64b1f91ec14c261527712243e98 | 8ad4c2594ff78b345dbd92e9d54d2a143ac4071a | refs/heads/master | 2023-02-03T20:19:36.544390 | 2020-12-21T21:49:10 | 2020-12-21T21:49:10 | 255,323,504 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | #!/usr/bin/env python3
"""
maximization step of GMM
"""
import numpy as np
def maximization(X, g):
"""
* X is a numpy.ndarray of shape (n, d) containing the data set
* g is a numpy.ndarray of shape (k, n) containing the posterior
probabilities for each data point in each cluster
* You may use at most 1 loop
Returns: pi, m, S, or None, None, None on failure
* pi is a numpy.ndarray of shape (k,) containing the updated
priors for each cluster
* m is a numpy.ndarray of shape (k, d) containing the updated
centroid means for each cluster
* S is a numpy.ndarray of shape (k, d, d) containing the updated
covariance matrices for each cluster
"""
if type(X) is not np.ndarray or len(X.shape) != 2:
return (None, None, None)
if type(g) is not np.ndarray or len(g.shape) != 2:
return (None, None, None)
if X.shape[0] != g.shape[1]:
return (None, None, None)
# sum per cluster should be 1
# so sum of all these ones should be n
sum = np.sum(g, axis=0)
sum = np.sum(sum)
if (int(sum) != X.shape[0]):
return (None, None, None)
n, d = X.shape
k, _ = g.shape
N_soft = np.sum(g, axis=1)
pi = N_soft / n
mean = np.zeros((k, d))
cov = np.zeros((k, d, d))
for clus in range(k):
rik = g[clus]
denomin = N_soft[clus]
# mean
mean[clus] = np.matmul(rik, X) / denomin
# cov
# we have to use element wise first to keep (d, n) by broadcasting
# then we can use the matrix multiplication to get (d, d) dims
first = rik * (X - mean[clus]).T
cov[clus] = np.matmul(first, (X - mean[clus])) / denomin
return (pi, mean, cov)
| [
"947@holbertonschool.com"
] | 947@holbertonschool.com |
ea4c1526be67546a9214f20d2e9b316de4de904d | 6b95338390c357a107712aa886b0d75e2a87f482 | /demos/S2Inner.py | 9ba02bbd0c84407603fb787b2588cc82f736bb7d | [
"MIT"
] | permissive | titos-carrasco/Scribbler2-Python | 238236e100741f96fa03d2d91b43f6a949371bf6 | 9cb5264fcd0dc85e1683a0d21ca5d550fec344e1 | refs/heads/master | 2021-10-28T07:09:40.509291 | 2021-10-14T01:59:12 | 2021-10-14T01:59:12 | 34,425,303 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test de los elementos internos del S2."""
import time
#from scribbler2.S2Serial import S2Serial
from scribbler2.S2Fluke2 import S2Fluke2
def main():
"""Realiza las pruebas de los elementos internos del S2."""
#robot = S2Serial( "/dev/ttyUSB0" )
robot = S2Fluke2( "/dev/rfcomm2" )
print( "getInfo : ", robot.getInfo() )
print( "getAllSensors: ", robot.getAllSensors() )
print( "setPass : ", robot.setPass( "1234567898765432" ) )
print( "getPass : ", robot.getPass() )
print( "setPass : ", robot.setPass( "ABCDEFGHIJKLMNOP" ) )
print( "getPass : ", robot.getPass() )
print( "setName : ", robot.setName( "NAME1234" ) )
print( "getName : ", robot.getName() )
print( "setName : ", robot.setName( "TitosBot" ) )
print( "getName : ", robot.getName() )
print( "getState : ", robot.getState() )
print( "setData : ", robot.setData( bytes( [ 8, 7, 6, 5, 4, 3, 2, 1 ] ) ) )
print( "getData : ", bytes.hex( robot.getData() ) )
print( "setSingleData: ", robot.setSingleData( 4, 255 ) )
print( "getData : ", bytes.hex( robot.getData() ) )
print( "setData : ", robot.setData( bytes( [ 1, 2, 3, 4, 5, 6, 7, 8 ] ) ) )
print( "getData : ", bytes.hex( robot.getData() ) )
robot.close()
if( __name__ == "__main__" ):
main()
| [
"titos.carrasco@gmail.com"
] | titos.carrasco@gmail.com |
5e8a32c10154dcef0aec12a0c25bbf53e42efec6 | ae2fb5a4b3f7e504c8e76d11696eefce098b3d0e | /isovar/cli/isovar_main.py | b76c9a0f2f45712ea4aecaed07c1c37e5ed33709 | [
"Apache-2.0"
] | permissive | Saintyven/isovar | bd2eadd064d3b95696286e3f74fe2f87d79ef09b | 6e29348ab6f969b2a5d0417d50784af5900b82fe | refs/heads/master | 2022-04-04T08:38:31.094940 | 2019-10-24T15:58:19 | 2019-10-24T15:58:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | # Copyright (c) 2019. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Primary Isovar command, used to collect information about variants,
the RNA reads which overlap and protein sequences which can be constructed
from reads that support the variant.
"""
from __future__ import print_function, division, absolute_import
import sys
from ..logging import get_logger
from ..dataframe_helpers import isovar_results_to_dataframe
from .main_args import run_isovar_from_parsed_args, make_isovar_arg_parser
from .output_args import add_output_args, write_dataframe
logger = get_logger(__name__)
def run(args=None):
if args is None:
args = sys.argv[1:]
parser = make_isovar_arg_parser()
parser = add_output_args(
parser,
filename="isovar-results.csv")
args = parser.parse_args(args)
logger.info(args)
isovar_results = run_isovar_from_parsed_args(args)
df = isovar_results_to_dataframe(isovar_results)
logger.info(df)
write_dataframe(df, args)
| [
"alex.rubinsteyn@gmail.com"
] | alex.rubinsteyn@gmail.com |
c9b1e188c4c5c23513d1000a647e8acc2d911e48 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/datetime/datetime_time_resolution.py | 2756d71c1095a7fee840f94ad805d1aa5e2e7027 | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 344 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Time resolution.
"""
__version__ = "$Id$"
#end_pymotw_header
import datetime
for m in [ 1, 0, 0.1, 0.6 ]:
try:
print '%02.1f :' % m, datetime.time(0, 0, 0, microsecond=m)
except TypeError, err:
print 'ERROR:', err
| [
"350840291@qq.com"
] | 350840291@qq.com |
9e613e5c2492c04c1f43db6ae750e6594de1561f | b5e4c4e3abb7f87bfd70ecd912810e2562cecdc5 | /section6/venv/Lib/site-packages/flask_jwt_extended/__init__.py | 931262ab70198da20fc600cd3744d800697a01dc | [] | no_license | chandshilpa/flaskapi | a89822707dc02f9c588af04f1f33f82a55b627b3 | 5f229d59d155e68e026566919d292c831ea00ed4 | refs/heads/master | 2022-12-09T10:59:14.563256 | 2019-01-08T17:33:46 | 2019-01-08T17:33:46 | 164,698,842 | 0 | 1 | null | 2022-12-07T16:24:53 | 2019-01-08T17:21:32 | Python | UTF-8 | Python | false | false | 671 | py | from .jwt_manager import JWTManager
from .view_decorators import (
fresh_jwt_required,
jwt_optional,
jwt_refresh_token_required,
jwt_required,
verify_fresh_jwt_in_request,
verify_jwt_in_request,
verify_jwt_in_request_optional,
verify_jwt_refresh_token_in_request,
)
from .utils import (
create_access_token,
create_refresh_token,
current_user,
decode_token,
get_csrf_token,
get_current_user,
get_jti,
get_jwt_claims,
get_jwt_identity,
get_raw_jwt,
set_access_cookies,
set_refresh_cookies,
unset_access_cookies,
unset_jwt_cookies,
unset_refresh_cookies,
)
__version__ = "3.13.1"
| [
"chandsandeep700@gmail.com"
] | chandsandeep700@gmail.com |
0b543a094a0055fa0410ceb17a2d9b02f84c9a10 | cd0987589d3815de1dea8529a7705caac479e7e9 | /webkit/WebKitTools/Scripts/webkitpy/layout_tests/port/google_chrome.py | d77266a204ab4eb2f1c21fa4b5eef67ed1568895 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | azrul2202/WebKit-Smartphone | 0aab1ff641d74f15c0623f00c56806dbc9b59fc1 | 023d6fe819445369134dee793b69de36748e71d7 | refs/heads/master | 2021-01-15T09:24:31.288774 | 2011-07-11T11:12:44 | 2011-07-11T11:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | #!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def GetGoogleChromePort(port_name, options):
"""Some tests have slightly different results when compiled as Google
Chrome vs Chromium. In those cases, we prepend an additional directory to
to the baseline paths."""
if port_name == 'google-chrome-linux32':
import chromium_linux
class GoogleChromeLinux32Port(chromium_linux.ChromiumLinuxPort):
def baseline_search_path(self):
paths = chromium_linux.ChromiumLinuxPort.baseline_search_path(
self)
paths.insert(0, self._webkit_baseline_path(
'google-chrome-linux32'))
return paths
return GoogleChromeLinux32Port(None, options)
elif port_name == 'google-chrome-linux64':
import chromium_linux
class GoogleChromeLinux64Port(chromium_linux.ChromiumLinuxPort):
def baseline_search_path(self):
paths = chromium_linux.ChromiumLinuxPort.baseline_search_path(
self)
paths.insert(0, self._webkit_baseline_path(
'google-chrome-linux64'))
return paths
return GoogleChromeLinux64Port(None, options)
elif port_name.startswith('google-chrome-mac'):
import chromium_mac
class GoogleChromeMacPort(chromium_mac.ChromiumMacPort):
def baseline_search_path(self):
paths = chromium_mac.ChromiumMacPort.baseline_search_path(
self)
paths.insert(0, self._webkit_baseline_path(
'google-chrome-mac'))
return paths
return GoogleChromeMacPort(None, options)
elif port_name.startswith('google-chrome-win'):
import chromium_win
class GoogleChromeWinPort(chromium_win.ChromiumWinPort):
def baseline_search_path(self):
paths = chromium_win.ChromiumWinPort.baseline_search_path(
self)
paths.insert(0, self._webkit_baseline_path(
'google-chrome-win'))
return paths
return GoogleChromeWinPort(None, options)
raise NotImplementedError('unsupported port: %s' % port_name)
| [
"sdevitt@rim.com"
] | sdevitt@rim.com |
101a28b6934b742d76e6b136b51043e90f5f8fd1 | a16100795f1e965c6084043599ca7c99417c2bc3 | /RealSenseSDK/PointCloud.py | 9dfa7a373445d810602c3cfb2e9e54d03b5cb6f1 | [
"MIT"
] | permissive | cutz-j/AR-project | 5f8d625bcd36ab3958f2d020a882035ffe296ddf | 50d4f407a4f2c42e12bf2bcd54c436df6fa3c9fa | refs/heads/master | 2020-05-25T20:12:57.421200 | 2019-06-21T09:56:14 | 2019-06-21T09:56:14 | 187,969,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | ### point cloud API ###
import pyrealsense2 as rs
import numpy as np
pc = rs.pointcloud()
points = rs.points()
pipeline = rs.pipeline()
pipeline.start()
try:
frames = pipeline.wait_for_frames()
depth = frames.get_depth_frame()
color = frames.get_color_frame()
pc.map_to(color)
points = pc.calculate(depth)
points.export_to_ply("1.ply", color)
finally:
pipeline.stop() | [
"cutz309@gmail.com"
] | cutz309@gmail.com |
bfbaa6f7cf309e1b1e508871989aa6c9430438fe | 9e41adf86b2c166a219f0b6d9371089c5f2d7d93 | /Exerciciospython2/Função 02/e101.py | 47944fcb153f871c3bbd159866248090a3a69132 | [] | no_license | Nadirlene/Exercicios-python | 1aaead61dd0efcb5303f6294e765e9e1d54506cc | 3fe82e166003922ef749756a249840ed1fe940b0 | refs/heads/main | 2022-12-25T21:35:06.172839 | 2020-09-28T15:08:37 | 2020-09-28T15:08:37 | 299,343,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | def voto(ano):
from datetime import date
idade = date.today().year - ano
print(f'Com {idade} anos: ', end='')
if 16 <= idade < 18 or idade >= 65:
return f'VOTO FACULTAVIO.'
elif 18 <= idade < 65:
return f'VOTO OBRIGATÓRIO.'
else:
return 'NÃO VOTA'
anoNascimento = int(input('Em que ano você nasceu? '))
print(voto(anoNascimento))
| [
"nadirleneoliveira@yahoo.com"
] | nadirleneoliveira@yahoo.com |
5ea8a4aa18c53efa84fed48b0f5e63616cbd6945 | d305e9667f18127e4a1d4d65e5370cf60df30102 | /tests/st/ops/gpu/test_tensoradd.py | 01718814364f78d0985a4197311e43e63b9eaecd | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknow... | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 4,455 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(device_target='GPU')
class TensroAdd(nn.Cell):
def __init__(self):
super(TensroAdd, self).__init__()
self.add = P.TensorAdd()
self.x = Parameter(initializer(
Tensor(np.random.randn(2, 0).astype(np.float32)), [2, 0]), name='x')
self.y = Parameter(initializer(
Tensor(np.random.randn(2, 1).astype(np.float32)), [2, 1]), name='y')
self.x1 = Parameter(initializer(
Tensor(np.arange(3).reshape(3).astype(np.float32)), [3]), name='x1')
self.y1 = Parameter(initializer(
Tensor(np.array([2]).astype(np.float32)), [1]), name='y1')
self.x2 = Parameter(initializer(
Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(np.float32)), [3, 3, 3, 3]), name='x2')
self.y2 = Parameter(initializer(
Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(np.float32)), [3, 3, 3, 3]), name='y2')
self.x3 = Parameter(initializer(
Tensor(np.arange(1 * 1 * 3 * 3).reshape(1, 1, 3, 3).astype(np.float32)), [1, 1, 3, 3]), name='x3')
self.y3 = Parameter(initializer(
Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(np.float32)), [3, 3, 3, 3]), name='y3')
@ms_function
def construct(self):
return (
self.add(self.x, self.y), self.add(self.x1, self.y1), self.add(self.x2, self.y2),
self.add(self.x3, self.y3))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_TensorAdd():
add = TensroAdd()
output = add()
expect0 = np.array([])
expect1 = np.array([2, 3, 4])
expect2 = np.array(
[[[[0., 2., 4.],
[6., 8., 10.],
[12., 14., 16.]],
[[18., 20., 22.],
[24., 26., 28.],
[30., 32., 34.]],
[[36., 38., 40.],
[42., 44., 46.],
[48., 50., 52.]]],
[[[54., 56., 58.],
[60., 62., 64.],
[66., 68., 70.]],
[[72., 74., 76.],
[78., 80., 82.],
[84., 86., 88.]],
[[90., 92., 94.],
[96., 98., 100.],
[102., 104., 106.]]],
[[[108., 110., 112.],
[114., 116., 118.],
[120., 122., 124.]],
[[126., 128., 130.],
[132., 134., 136.],
[138., 140., 142.]],
[[144., 146., 148.],
[150., 152., 154.],
[156., 158., 160.]]]])
expect3 = np.array(
[[[[0., 2., 4.],
[6., 8., 10.],
[12., 14., 16.]],
[[9., 11., 13.],
[15., 17., 19.],
[21., 23., 25.]],
[[18., 20., 22.],
[24., 26., 28.],
[30., 32., 34.]]],
[[[27., 29., 31.],
[33., 35., 37.],
[39., 41., 43.]],
[[36., 38., 40.],
[42., 44., 46.],
[48., 50., 52.]],
[[45., 47., 49.],
[51., 53., 55.],
[57., 59., 61.]]],
[[[54., 56., 58.],
[60., 62., 64.],
[66., 68., 70.]],
[[63., 65., 67.],
[69., 71., 73.],
[75., 77., 79.]],
[[72., 74., 76.],
[78., 80., 82.],
[84., 86., 88.]]]]
)
assert (output[0].asnumpy() == expect0).all()
assert (output[1].asnumpy() == expect1).all()
assert (output[2].asnumpy() == expect2).all()
assert (output[3].asnumpy() == expect3).all()
| [
"513344092@qq.com"
] | 513344092@qq.com |
d1094ebebfbee9395ef571ae2d01cecbd38179dc | cf6bfafdc3d897589373e3577b329cd96897d4e1 | /apps/operation/models.py | cfcb7cbe7808bfb1d3066bb20ef42218306f40ec | [] | no_license | ChenxiiCheng/Django-online-edu | b6911a348875d60fb50903b9a9d9296c115adbb5 | 2b2894703ce4711e562c5ad7a7474b811e8f8ba0 | refs/heads/master | 2022-12-12T09:18:41.241698 | 2019-09-08T22:06:48 | 2019-09-08T22:06:48 | 176,599,775 | 2 | 0 | null | 2022-11-22T03:29:32 | 2019-03-19T21:17:57 | Python | UTF-8 | Python | false | false | 2,439 | py | # _*_ encoding: utf-8 _*_
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
from users.models import UserProfile
from courses.models import Course
# Create your models here.
class UserAsk(models.Model):
name = models.CharField(max_length=20, verbose_name=u"姓名")
mobile = models.CharField(max_length=11, verbose_name=u"手机")
course_name = models.CharField(max_length=50, verbose_name=u"课程名")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户咨询"
verbose_name_plural = verbose_name
class CourseComments(models.Model):
user = models.ForeignKey(UserProfile, verbose_name=u"用户")
course = models.ForeignKey(Course, verbose_name=u"课程")
comments = models.CharField(max_length=200, verbose_name=u"评论")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"课程评论"
verbose_name_plural = verbose_name
class UserFavorite(models.Model):
user = models.ForeignKey(UserProfile, verbose_name=u"用户")
fav_id = models.IntegerField(default=0, verbose_name=u"数据id")
fav_type = models.IntegerField(choices=((1, u"课程"), (2, u'课程机构'), (3, u'讲师')), default=1, verbose_name=u"收藏类型")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户收藏"
verbose_name_plural = verbose_name
class UserMessage(models.Model):
user = models.IntegerField(default=0, verbose_name=u"接收用户")
message = models.CharField(max_length=500, verbose_name=u"消息内容")
has_read = models.BooleanField(default=False, verbose_name=u"是否已读")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户消息"
verbose_name_plural = verbose_name
class UserCourse(models.Model):
user = models.ForeignKey(UserProfile, verbose_name=u"用户")
course = models.ForeignKey(Course, verbose_name=u"课程")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"用户课程"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
| [
"chenxic1011@gmail.com"
] | chenxic1011@gmail.com |
f1c7a7587cc5690b153486597dd9c1be1b8956ba | c4bcb851c00d2830267b1997fa91d41e243b64c2 | /cluster_images.py | 35709d61e209942a6a2109e6c05762d9f84199a9 | [] | no_license | tjacek/cluster_images | 5d6a41114a4039b3bdedc34d872be4e6db3ba066 | 8c660c69658c64c6b9de66d6faa41c92486c24c5 | refs/heads/master | 2021-01-23T21:01:17.078036 | 2018-06-07T14:33:50 | 2018-06-07T14:33:50 | 44,881,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | import utils.imgs,utils.files,utils.conf
import select_imgs
import select_imgs.clustering
import select_imgs.tools
import numpy as np
import cv2
import utils.dirs
from preproc import select_extractor
def cluster_images(conf_path):
in_path=conf_path['img_path']
out_path=conf_path['cls_path']
data=utils.imgs.make_imgs(in_path,norm=True)
extractor=select_extractor(conf_dict)
imgset=[ extractor(img_i) for img_i in data ]
cls_alg=select_imgs.clustering.DbscanAlg()
labels=cls_alg(imgset)
unorm_data=utils.imgs.unorm(data)
clusters=select_imgs.split_cls(labels,unorm_data)
select_imgs.save_cls(out_path,clusters)
if __name__ == "__main__":
conf_path="conf/dataset3.cfg"
conf_dict=utils.conf.read_config(conf_path)
cluster_images(conf_dict) | [
"tjacek@student.agh.edu.pl"
] | tjacek@student.agh.edu.pl |
130ffc81e3401c1ab8dfab39d856f564ee030543 | ab9196b6356e3c0af7baf7b768d7eb8112243c06 | /Python&DataBase/5.20/HW02Pandas02_15_JoConfirmStep02_김주현.py | c85ff712a6b2d518456ae9073763738c5970a386 | [] | no_license | wngus9056/Datascience | 561188000df74686f42f216cda2b4e7ca3d8eeaf | a2edf645febd138531d4b953afcffa872ece469b | refs/heads/main | 2023-07-01T00:08:00.642424 | 2021-08-07T02:10:25 | 2021-08-07T02:10:25 | 378,833,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py |
import random
while True:
user_name = []
number = []
user_name = input('4인 이상의 이름을 입력하세요 : ').split()
if len(user_name) < 4:
print('^ 명수를 확인하세요.')
continue
elif len(user_name) >= 4:
while True:
num = random.randint(1,len(user_name))
if num not in number:
number.append(num)
else:
continue
if len(number) == len(user_name):
break
for i in user_name:
print(i, end=' ')
print()
for i in number:
print(i, end='\t')
print()
'''
def def_num4():
while True:
num = random.randint(1, len(a))
if num not in b:
b.append(num)
else:
continue
'''
'''
def def_under4():
print('^ 명수를 확인하세요 ( 4인 이상 )')
def def_upper4():
while True:
number = random.randint(1, len(data_input))
if number not in b:
b.append(number)
else:
continue
if len(str(b)) == len(str(number)):
break
def def_bone():
if len(data_input) < 4:
def_under4()
elif len(data_input) >= 4:
def_upper4()
num_check = []
b = []
while True:
data_input = input('4인 이상의 이름을 입력하세요 (스페이스바로 구분) : ').split( )
def_bone()
print(data_input)
print(b)
'''
'''
number = []
while len(number) <3:
num = random.randint(1,9)
if num not in number:
number.append(num)
print(number)
'''
| [
"noreply@github.com"
] | wngus9056.noreply@github.com |
ec1ac6043b0b3334fd1d49469b9b8af4b9d71759 | c1eb69dc5dc5b83d987d1bda0bd74a2d7d912fdf | /core/migrations/0019_set_contact_email.py | e89fc8ac2a99c28523f168f9793f3b9850b731af | [
"MIT"
] | permissive | CIGIHub/opencanada | 47c4e9268343aaaf0fe06b62c1838871968a0b87 | 6334ff412addc0562ac247080194e5d182e8e924 | refs/heads/staging | 2023-05-07T16:02:35.915344 | 2021-05-26T18:10:09 | 2021-05-26T18:10:09 | 36,510,047 | 8 | 2 | MIT | 2020-07-06T14:22:09 | 2015-05-29T14:43:28 | Python | UTF-8 | Python | false | false | 559 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def set_email(apps, schema_editor):
SiteDefaults = apps.get_model('core', 'SiteDefaults')
defaults = SiteDefaults.objects.all()
for default_settings in defaults:
default_settings.contact_email = "info@opencanada.org"
default_settings.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20151002_2248'),
]
operations = [
migrations.RunPython(set_email),
]
| [
"csimpson@cigionline.org"
] | csimpson@cigionline.org |
817af0ffbe1077dcda4a8022e6a061efe8ff6905 | d68a5968315ac482ed2cf334c054c685f0d34694 | /PostgreSQLConnection.py | f6c3de681bb6443a772ba7411d953af11d33f1a5 | [] | no_license | stixaw/PythonScripts | 2aad71992d275cd5835715e51f970370a756aebc | 04d0f8f538f76a82ae711de3db80c5e626d95cf1 | refs/heads/master | 2021-04-28T12:57:55.642316 | 2020-10-08T16:02:22 | 2020-10-08T16:02:22 | 122,090,737 | 0 | 0 | null | 2018-02-20T19:18:39 | 2018-02-19T16:37:19 | Python | UTF-8 | Python | false | false | 686 | py | import psycopg2
def connPostgreSQL():
try:
conn = psycopg2.connect(database="dvdtwo", user = "postgres", password = "p@ssword", host = "127.0.0.1", port = "5432")
print("Opened connection successfully")
#conn.close()
#print("Connection Closed")
except:
print("Connection failed")
return conn
if __name__ == "__main__":
con = connPostgreSQL()
cur = con.cursor()
cur.execute("SELECT actor_id, first_name, last_name from actor")
rows = cur.fetchall()
for row in rows:
print("ID = ".format(row[0]))
print("FirstName = {}".format(row[1]))
print("LastName = {}. \n".format( row[2]))
print( "Operation done successfully")
con.close() | [
"noreply@github.com"
] | stixaw.noreply@github.com |
b84026150a3cf4aeffc7ecda6f05ba4987ddbaa1 | fe8c1ce22f2a8dac1abdba5d51732b6307468614 | /djimix/people/departments.py | 1f5a4d839ad3dba8e470377dd41861dcbb8f6c9c | [
"MIT"
] | permissive | carthage-college/django-djimix | 27a6697d118632c1737eec3f03189caa8e2b255a | 69b3a08498fd4ed055a3808c8bf41a3a8590b33a | refs/heads/master | 2023-03-04T04:58:44.178564 | 2023-02-24T17:57:41 | 2023-02-24T17:57:41 | 196,456,967 | 0 | 0 | MIT | 2023-02-24T17:58:22 | 2019-07-11T19:55:52 | Python | UTF-8 | Python | false | false | 3,885 | py | from django.conf import settings
from django.core.cache import cache
from djimix.core.database import get_connection, xsql
from djimix.sql.departments import (
ACADEMIC_DEPARTMENTS, ALL_DEPARTMENTS,
DEPARTMENT_FACULTY, DEPARTMENT_DIVISION_CHAIRS,
FACULTY_DEPTS, PERSON_DEPARTMENTS, STAFF_DEPTS,
)
from collections import OrderedDict
def department(code):
"""Returns the department given the three letter code."""
sql = "{0} AND hrdept = '{1}' ORDER BY DESCR".format(ALL_DEPARTMENTS, code)
rows = xsql(sql)
try:
return rows.fetchone()
except AttributeError:
return None
def departments_all_choices():
"""Returns department tuples for choices parameter in models and forms."""
faculty = xsql(FACULTY_DEPTS)
staff = xsql(STAFF_DEPTS)
depts = [
('', '---Choose Your Department---'),
('', '---Faculty Departments---'),
]
if faculty:
for fac in faculty:
depts.append((fac.pcn_03.strip(), fac.department.strip()))
depts.append(('', '---Staff Deparments---'))
if staff:
for st in staff:
depts.append((st.hrdept.strip(), st.department.strip()))
depts.append(('CCFI', 'College Culture for Incl'))
return depts
def academic_department(did):
"""Returns academic departments based on department ID."""
sql = "{0} AND dept_table.dept = '{1}'".format(ACADEMIC_DEPARTMENTS, did)
rows = xsql(sql)
try:
return rows.fetchone()
except AttributeError:
return None
def person_departments(cid):
"""Returns all departments to which a person belongs."""
rows = xsql(PERSON_DEPARTMENTS(college_id=cid))
depts = []
for row in rows.fetchall():
depts.append((row.code.strip(), row.department.strip()))
return depts
def chair_departments(cid):
"""Returns all departments with which a chair/dean is associated."""
depts = OrderedDict()
base = """
SELECT
dept_table.dept as dept_code, dept_table.txt as dept_name,
dept_table.div as div_code, div_table.txt as div_name
FROM
dept_table
INNER JOIN
div_table ON dept_table.div = div_table.div
WHERE
CURRENT BETWEEN
dept_table.active_date
AND
NVL(dept_table.inactive_date, CURRENT)
AND
dept_table.web_display = "Y"
"""
sql = """
{0}
AND
div_table.head_id={1}
ORDER BY
dept_table.txt
""".format(base, cid)
rows = xsql(sql).fetchall()
if rows:
# division dean
dc = 'dean'
else:
# department chair
dc = 'chair'
sql = """
{0}
AND
dept_table.head_id={1}
AND
dept_table.dept != ("_ESN")
ORDER BY
dept_table.txt
""".format(base, cid)
rows = xsql(sql).fetchall()
if rows:
for row in rows:
depts[(row.dept_code)] = {
'dept_name': row.dept_name,
'dept_code': row.dept_code,
'div_name': row.div_name,
'div_code': row.div_code,
}
return ({'depts': depts}, dc, row.div_name, row.div_code)
else:
return ({'depts': depts}, None, None, None)
def department_division_chairs(where):
"""Return the department chair and division dean profiles."""
rows = xsql(DEPARTMENT_DIVISION_CHAIRS(where=where))
try:
return rows.fetchall()
except AttributeError:
return None
def department_faculty(code, year):
"""Return the faculty for the department given the dept code & year."""
rows = xsql(DEPARTMENT_FACULTY(year=year, dept=code))
try:
return rows.fetchall()
except AttributeError:
return None
| [
"plungerman@gmail.com"
] | plungerman@gmail.com |
087cbf47038b4d8d2209b39286d9880293cb0319 | 226a6846a5f607d49cbd1069554bb6cf7fd19e4e | /castpage/urls.py | 7ca24505a53c9fc77d07303c13aa6213a2c9852b | [
"MIT"
] | permissive | flyinactor91/Rocky-Rollcall | 8b699b9798e09771c8c42dccd1ef5e66799077d0 | ab2bdbbd5f5920e709a09d1b1182a388955211d9 | refs/heads/master | 2018-12-19T11:05:00.645811 | 2018-11-26T20:46:43 | 2018-11-26T20:46:43 | 120,975,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | """
Castpage URL patterns
"""
from django.urls import path
from castpage import views
_s = '<slug:slug>/'
urlpatterns = [
path('new', views.cast_new, name='cast_new'),
path('<slug:slug>', views.cast_home, name='cast_home'),
path(_s+'events', views.CastEvents.as_view(), name='cast_events'),
path(_s+'members', views.CastMembers.as_view(), name='cast_members'),
path(_s+'members/join', views.request_to_join, name='cast_member_join'),
path(_s+'members/leave', views.leave_cast, name='cast_member_leave'),
path(_s+'photos', views.CastPhotos.as_view(), name='cast_photos'),
path(_s+'photos/<int:pk>', views.cast_photo_detail, name='cast_photo_detail'),
]
| [
"michael@mdupont.com"
] | michael@mdupont.com |
440c0fcde2199962b0ea0cab69120ec84aaad8e8 | ec56f8b58b2f0fb0161d43b0ee5017e37ec19168 | /data/gerar_model.py | 84bf3237686216a578dc795aa00320c17d96b041 | [] | no_license | oturing/django-ibge | 3d423dd87c4a32438c6223d2b395f3706394c882 | 483415104a38b7af337cf865d677fbe57149c5be | refs/heads/master | 2021-01-17T06:01:36.373512 | 2012-05-31T14:10:28 | 2012-05-31T14:10:28 | 5,974,268 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 872 | py |
import pickle
from analisar_dbf import Campo
campos = pickle.load(open('campos.pickle'))
CABECALHO = '''\
from django.db import models
class Localidade(models.Model):'''
print CABECALHO
for campo in campos:
if campo.nome in ['id', 'gmrotation']:
continue # ignorar estes campos
params = [('db_index', True)]
if campo.tipo is unicode:
model_field ='CharField'
params.append(('max_length', campo.larg))
if campo.blank:
params.append(('blank', True))
else:
model_field ='DecimalField'
params.append(('max_digits', campo.larg))
params.append(('decimal_places', campo.decimais))
if campo.null:
params.append(('null', True))
params = ', '.join([nome+'='+repr(valor) for nome, valor in params])
print ' {} = models.{}({})'.format(campo.nome, model_field, params) | [
"luciano@ramalho.org"
] | luciano@ramalho.org |
d1815db80aef29624e780261e5d6d57873523529 | 79f4a01edbb21797e10f9cb9445a040cc02031d6 | /exercise/test0822/test.py | 177a49dc07cf6252eef1baca82a69f955f6c91de | [] | no_license | sangmain/ai_study | 28c8f35c4b9fe0041623ca6f84c735841e8c5410 | 1468465ca9afd17aa252302d51785d2ad46f77df | refs/heads/master | 2020-06-25T22:49:21.275481 | 2019-09-25T00:45:01 | 2019-09-25T00:45:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | import numpy as np
x_train = [
[1,2,3,4,5,None, None, None, 9, 10, 11],
[2,3,4,5,6, None, None, None, 10, 11, 12],
[50,51,52,53,54, None, None, None, 58, 59, 60]]
y_train = [[6, 7, 8], [7,8,9], [55, 56, 57]]
x_test = [[35,36,37,38,39, None, None, None, 43, 44, 45]]
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
x_train = x_train.reshape(-1, 11, 1)
x_test = x_test.reshape(-1, 11, 1)
from keras.models import Sequential
from keras.layers import LSTM, Dense
model = Sequential()
model.add(LSTM(50, input_shape = (11,1), activation='relu'))
# model.add(Dense(10, activation='relu'))
# model.add(Dense(512, activation='relu'))
# model.add(Dense(32, activation='relu'))
# model.add(Dense(16, activation='relu'))
model.add(Dense(3, activation='relu'))
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
model.fit(x_train, y_train, epochs=30, batch_size=1, verbose=2)
pred = model.predict(x_test, batch_size=1)
print(pred) | [
"sangmin99554@naver.com"
] | sangmin99554@naver.com |
2538c820a3db075d877932f6f53496dd54d351ff | a5b563bc83fce5a652a01dd3b07550cb53e54ea3 | /NLP/hw09_solution.py | 2a851fa0d9acb3d36e894c12a123ca948812cdff | [] | no_license | ljc19800331/HW_Python | d89b38ced5476453f5a0ae98752c7cd34bf351c0 | 08a503b57f48e306bb3898f3c31785144b96e0cb | refs/heads/master | 2020-03-27T06:42:09.504267 | 2018-11-28T21:32:40 | 2018-11-28T21:32:40 | 146,128,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | import nltk
from nltk.corpus import brown
from collections import Counter
from collections import defaultdict
import numpy as np
from scipy import spatial
import heapq
import operator
def get_tf(training_category):
# get the term-sentence matrix
training_category = ['reviews']
brown.sents(categories=training_category)
Sents_text = brown.sents(categories=training_category)
tokens = brown.words(categories=training_category)
wordCounter = Counter(tokens)
word_index = 0
word_dict = {}
for words in wordCounter:
temp_array = np.zeros(len(Sents_text))
for sent_index in range(len(Sents_text)):
temp_array[sent_index] = Sents_text[sent_index].count(words)
word_dict[words] = temp_array
word_index += 1
return word_dict
def get_idf(Dict_mat, word):
N_doc = len(Dict_mat.items()[0][1])
vec_word = Dict_mat[word]
N_idf = len([i for i in vec_word if i > 0])
return np.log( N_doc / N_idf)
def get_tfidf(Dict_mat, word_interest, word_obj):
vec_tf = Dict_mat[word_obj]
value_idf = get_idf(Dict_mat, word_obj)
return vec_tf * value_idf
def get_cos(Dict_mat, word_interest, word_obj):
vec_interest = get_tfidf(Dict_mat, word_interest, word_interest)
vec_obj = get_tfidf(Dict_mat, word_interest, word_obj)
cos_interest_obj = 1 - spatial.distance.cosine(vec_interest, vec_obj)
return cos_interest_obj
def related_words(word_interest, n):
# Load the pre-trained numpy dataset
Dict_mat = np.load('/home/maguangshen/PycharmProjects/HW_Python/Dict_Train_review.npy').item()
# cos function for tfidf
Dict_cos = {}
for idx, item in enumerate(Dict_mat):
if ((idx % 1000) == 0):
print(idx)
Dict_cos[item] = get_cos(Dict_mat, word_interest, item)
print('All the cos results in the dict is ', Dict_cos)
# Return the n sorted list
Dict_sorted = sorted(Dict_cos.items(), key=operator.itemgetter(1), reverse=True)
print(Dict_sorted)
List_words = []
for item in Dict_sorted:
List_words.append(str(item[0]))
print('The list of related words is ', List_words)
return List_words
if __name__ == "__main__":
word = 'play'
n = 10
related_words(word, n)
| [
"maguangshen@gmail.com"
] | maguangshen@gmail.com |
aeef4dd6a80bdeda9629ce16b48ec061db566b4e | 807022b4aebd9c4b2e8b0f5b7c209cf21c697381 | /ocr_structuring/core/utils/extract_charges/row.py | 965b8aa187f50f7f80e4f24e48367f2b85adb24e | [] | no_license | imfifc/myocr | 4abc5480222f4828072857fbb84236f4a494b851 | 71ba56084aabfa8b07ddc1842bcac5cdbd71212c | refs/heads/master | 2022-12-13T13:04:51.488913 | 2020-09-02T09:07:09 | 2020-09-02T09:07:09 | 292,232,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | from .classifier import ContentClassifier
class Row:
def __init__(self, labels):
self.labels = labels
self.names = []
self.amounts = []
self.quantities = []
def __len__(self):
return len(self.labels)
def __getitem__(self, i):
return self.labels[i]
def __str__(self):
return str(self.labels)
__repr__ = __str__
def sort(self, key):
self.labels.sort(key=key)
def append(self, label):
self.labels.append(label)
def stat(self):
for i in range(0, len(self.labels)):
content_classifier = self.labels[i].content_classifier
if content_classifier == ContentClassifier.NAME:
self.names.append(i)
elif content_classifier == ContentClassifier.AMOUNT:
self.amounts.append(i)
elif content_classifier == ContentClassifier.QUANTITY:
self.quantities.append(i)
| [
"1193690571@qq.com"
] | 1193690571@qq.com |
f435657d504d329ce57f523c62af0e888cff7846 | 482c670a1885d4058909d817b1b20904eedb09c6 | /python_web_auto/zuoye/BBS_001_postpage.py | c833e15fab0146627e65635c2174d174db602933 | [] | no_license | qiaoxu1123/python-auto | 13bddeafd3832dc9c28f39ab9fa2343d23fd3dd0 | 15bfc1aaedbbdf7a00f03cd3552ed8e7b30eabdc | refs/heads/master | 2020-04-02T16:10:15.862150 | 2018-11-04T07:27:51 | 2018-11-04T07:27:51 | 154,601,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | from pageobjects.base import BasePage
from selenium.webdriver.common.by import By
import time
class PostPage(BasePage):
# 帖子标题
post_page_input_title_loc = (By.NAME, 'subject')
# 帖子内容
post_page_input_neirong_loc = (By.ID, 'fastpostmessage')
# 点击发表帖子
post_page_button_fabiao_loc = (By.CSS_SELECTOR, '#fastpostsubmit strong')
# 发帖部分
def post1(self, title):
self.sendkeys(title, *self.post_page_input_title_loc)
time.sleep(2)
def post2(self, neirong):
self.sendkeys(neirong, *self.post_page_input_neirong_loc)
time.sleep(2)
self.click(*self.post_page_button_fabiao_loc)
time.sleep(2) | [
"you@example.com"
] | you@example.com |
f4060636d462c80c2307eb30017d339f36c3ecb7 | 7d1100d44207ac14df50e3f49a99acf1fa996528 | /run_experiments_crawl_batch_size.py | 54e1187c27911eca06e16defe28fff9f0b97b194 | [] | no_license | Tribler/trustchain-simulator-pysim | 09b07cc21c5495212024a9a1f6b131a7cc7f7356 | bf7a72545a7353d2d03a4d9129c291c8a5204088 | refs/heads/master | 2023-03-28T07:07:39.802436 | 2021-03-22T14:23:06 | 2021-03-22T14:23:06 | 288,008,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | from multiprocessing.context import Process
from simpy import Environment
from chainsim.settings import SimulationSettings
from chainsim.simulation import TrustChainSimulation
def run(settings):
env = Environment()
simulation = TrustChainSimulation(settings, env)
simulation.run()
if __name__ == "__main__":
for strategy in [0, 1, 2, 3]:
processes = []
for crawl_batch_size in range(1, 11):
settings = SimulationSettings()
settings.peers = 1000
settings.crawl_batch_size = crawl_batch_size
settings.exchange_strategy = strategy
p = Process(target=run, args=(settings,))
p.start()
processes.append(p)
for p in processes:
p.join()
print("Fully evaluated strategy %d!" % strategy)
| [
"mdmartijndevos@gmail.com"
] | mdmartijndevos@gmail.com |
243a8404e4029008f3ed83b6c3e5561f188738f8 | 5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4 | /interview_bits/level_2/02_binary_search/04_sort_modification/02_median-of-array.py | ab6a2a7a309107255a263a9c35ce8f03a2d03ae7 | [] | no_license | salvador-dali/algorithms_general | 04950bd823fc354adc58a4f23b7d2f3d39664798 | aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d | refs/heads/master | 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | arr1 = [1, 4, 5]
arr2 = [2, 3, 3, 4]
def median2(a, b):
return (a + b) / 2.
def median3(a, b, c):
return sorted([a, b, c])[1]
def median4(a, b, c, d):
tmp = sorted([a, b, c, d])
return (tmp[1] + tmp[2]) / 2.
def findMedianUtil(A, N, B, M):
if N == 0:
m = len(B)
if m % 2:
return B[m / 2]
return median2(B[m / 2 - 1], B[m / 2])
if N == 1:
if M == 1:
return median2(A[0], B[0])
if M % 2:
return median2(B[M/2], median3(A[0], B[M/2 - 1], B[M/2 + 1]))
return median3(B[M/2], B[M/2 - 1], A[0])
if N == 2:
if M == 2:
return median4(A[0], A[1], B[0], B[1])
if M % 2:
return median3(B[M/2], max(A[0], B[M/2 - 1]), min(A[1], B[M/2 + 1]))
return median4(B[M/2], B[M/2 - 1], max(A[0], B[M/2 - 2]), min(A[1], B[M/2 + 1]))
idxA, idxB = (N - 1) / 2, (M - 1) / 2
if A[idxA] <= B[idxB]:
return findMedianUtil(A + idxA, N / 2 + 1, B, M - idxA)
return findMedianUtil( A, N / 2 + 1, B + idxA, M - idxA )
def findMedian(arr1, arr2):
if len(arr1) > len(arr2):
return findMedianUtil(arr2, len(arr2), arr1, len(arr1))
return findMedianUtil(arr1, len(arr1), arr2, len(arr2))
print findMedian([], arr2) | [
"dmytro@knowlabs.com"
] | dmytro@knowlabs.com |
a533f41096fc2311071c9d2b0730cea7ec89c9c0 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5688567749672960_1/Python/CuteCube/main.py | a9c97f12149c22ac7839f1632376963a5b5b356b | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | #!/usr/bin/env python
def main():
f = open('input.txt', 'r')
total_T = int(f.readline())
for T in xrange(1,total_T+1):
N = int(f.readline().rstrip('\n'))
# print find_speed(M)
print 'Case #{}: {}'.format(T, resolve(N))
def resolve(N):
n_str = [x for x in str(N)]
# print n_str
if len(n_str) == 1:
return N
if n_str[-1] == '0':
return 1 + resolve(N - 1)
if all(x == '0' for x in n_str[1:-1]) and n_str[0] == '1' and n_str[-1] == '1':
# print 2
return 2 + resolve(N - 2)
for x in xrange(len(n_str)/2, len(n_str)):
n_str[x] = '0'
n_str[-1] = '1'
c = N - int(''.join(n_str))
if any(n_str[x]!=n_str[len(n_str)-x-1] for x in xrange(len(n_str)/2)):
c += 1
return c + resolve(int(''.join(reversed(n_str))))
def find_speed(M):
v = 0
for x in xrange(0, len(M)-1):
if M[x] > M[x+1]:
v = max(v, M[x]-M[x+1])
return v
if __name__ == '__main__':
main() | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
f7566929bf81ce7f554a012545d6233b231f2e4c | 79732bc2534f8ba4b6cf4cf05caf3270b35d6384 | /miseq_portal/minion_uploader/models.py | b062b5f1072b00cbd0d65cf6575f3047bdfa3b3b | [
"MIT"
] | permissive | BFSSI-Bioinformatics-Lab/miseq_portal | 15e5f46caf985246315815b848c01737a255eacd | a70ad378c093bbbfa54645e511fe76280e115859 | refs/heads/master | 2023-06-10T20:38:55.361444 | 2022-03-03T15:38:20 | 2022-03-03T15:38:20 | 143,023,632 | 0 | 0 | MIT | 2023-05-25T17:06:17 | 2018-07-31T14:13:15 | Python | UTF-8 | Python | false | false | 278 | py | from chunked_upload.models import ChunkedUpload
# 'ChunkedUpload' class provides almost everything for you.
# if you need to tweak it little further, create a model class
# by inheriting "chunked_upload.models.AbstractChunkedUpload" class
ZippedMinIONRunUpload = ChunkedUpload
| [
"forest.dussault@canada.ca"
] | forest.dussault@canada.ca |
64b0846893130a46190ac9a705ebc9a75cb718de | fd25231975acd147e04dc3ed3627c92cb1a4f86c | /FlaskAPI/vir_env/lib/python3.7/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | f9f8fd2d4c5e28133e22737a8b09951bcb2968a8 | [] | no_license | sumitkutty/Flight-Price-Prediction | 832a2802a3367e655b46d3b44f073d917abd2320 | d974a8b75fbcbfa42f11703602af3e45a3f08b3c | refs/heads/master | 2022-12-25T07:13:06.375888 | 2020-10-08T18:46:44 | 2020-10-08T18:46:44 | 302,366,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:bd2fc56e80fc6d351161ebebeb5183299b8aacd69a54064626b1df6e4e5331c9
size 29989
| [
"sumitkutty37@gmail.com"
] | sumitkutty37@gmail.com |
ae96c95eac692a1e7964bf6801acd1cdc6fd7b4e | 933f3d61b6cbb9d3b3a6dde701fbb48981e49ce8 | /callback/optimizater/ralars.py | 5ed529186ee45b55a7ea1fa2326c24a378317175 | [
"MIT"
] | permissive | lonePatient/BERT-NER-Pytorch | 81a898bf90fab28f7574bab6ea7257ff52650bc4 | 7c938a2ded28d8d379fef1879e07da08df5a1eb8 | refs/heads/master | 2023-03-17T13:27:04.139577 | 2023-03-11T03:14:55 | 2023-03-11T03:14:55 | 170,256,148 | 1,928 | 428 | MIT | 2022-07-18T08:17:59 | 2019-02-12T05:12:07 | Python | UTF-8 | Python | false | false | 5,124 | py | import math
import torch
from torch.optim.optimizer import Optimizer
class RaLars(Optimizer):
"""Implements the RAdam optimizer from https://arxiv.org/pdf/1908.03265.pdf
with optional Layer-wise adaptive Scaling from https://arxiv.org/pdf/1708.03888.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate
betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_clip (float, optional): the maximal upper bound for the scale factor of LARS
Example:
>>> model = ResNet()
>>> optimizer = RaLars(model.parameters(), lr=0.001)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0,
scale_clip=None):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(RaLars, self).__init__(params, defaults)
# LARS arguments
self.scale_clip = scale_clip
if self.scale_clip is None:
self.scale_clip = (0, 10)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# Get group-shared variables
beta1, beta2 = group['betas']
sma_inf = group.get('sma_inf')
# Compute max length of SMA on first step
if not isinstance(sma_inf, float):
group['sma_inf'] = 2 / (1 - beta2) - 1
sma_inf = group.get('sma_inf')
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Bias correction
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Compute length of SMA
sma_t = sma_inf - 2 * state['step'] * (1 - bias_correction2) / bias_correction2
update = torch.zeros_like(p.data)
if sma_t > 4:
# Variance rectification term
r_t = math.sqrt((sma_t - 4) * (sma_t - 2) * sma_inf / ((sma_inf - 4) * (sma_inf - 2) * sma_t))
# Adaptive momentum
update.addcdiv_(r_t, exp_avg / bias_correction1,
(exp_avg_sq / bias_correction2).sqrt().add_(group['eps']))
else:
# Unadapted momentum
update.add_(exp_avg / bias_correction1)
# Weight decay
if group['weight_decay'] != 0:
update.add_(group['weight_decay'], p.data)
# LARS
p_norm = p.data.pow(2).sum().sqrt()
update_norm = update.pow(2).sum().sqrt()
phi_p = p_norm.clamp(*self.scale_clip)
# Compute the local LR
if phi_p == 0 or update_norm == 0:
local_lr = 1
else:
local_lr = phi_p / update_norm
state['local_lr'] = local_lr
p.data.add_(-group['lr'] * local_lr, update)
return loss
| [
"1436496575@qq.com"
] | 1436496575@qq.com |
56c2e6045964d84f02ee669fd2ffa251f599ea78 | 7ed22d30256fe955b070fb0b6838ad2c507f48ba | /bench/bench_binary_vs_http.py | 709bd25a2f7505414e82fe880076ce6cfdbbe8ba | [] | no_license | coleifer/kt | 9c55339053fb7deb595f7d7bc4fb032e927e29c4 | c1d50484ab1c0a80d502cc857265a2d5362f431d | refs/heads/master | 2023-07-07T17:12:34.745629 | 2023-06-23T23:21:10 | 2023-06-23T23:21:10 | 128,153,979 | 20 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | #!/usr/bin/env python
"""
Benchmark script to measure time taken to read, write and delete using the
binary protocol and HTTP protocol.
"""
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import contextlib
import time
from kt import *
server = EmbeddedServer(quiet=True)
server.run()
db = server.client
def do_set_bulk(nrows, chunksize, klen, vlen):
kfmt = '%%0%sd' % klen
vfmt = '%%0%sd' % vlen
for i in range(0, nrows, chunksize):
accum = {kfmt % j: vfmt % j for j in range(i, i + chunksize)}
db.set_bulk(accum)
def do_set_bulk_http(nrows, chunksize, klen, vlen):
kfmt = '%%0%sd' % klen
vfmt = '%%0%sd' % vlen
for i in range(0, nrows, chunksize):
accum = {kfmt % j: vfmt % j for j in range(i, i + chunksize)}
db._http.set_bulk(accum)
def do_get_bulk(nrows, chunksize, klen, vlen):
kfmt = '%%0%sd' % klen
for i in range(0, nrows, chunksize):
accum = [kfmt % j for j in range(i, i + chunksize)]
db.get_bulk(accum)
def do_get_bulk_http(nrows, chunksize, klen, vlen):
kfmt = '%%0%sd' % klen
for i in range(0, nrows, chunksize):
accum = [kfmt % j for j in range(i, i + chunksize)]
db._http.get_bulk(accum)
def do_remove_bulk(nrows, chunksize, klen, vlen):
kfmt = '%%0%sd' % klen
for i in range(0, nrows, chunksize):
accum = [kfmt % j for j in range(i, i + chunksize)]
db.remove_bulk(accum)
def do_remove_bulk_http(nrows, chunksize, klen, vlen):
kfmt = '%%0%sd' % klen
for i in range(0, nrows, chunksize):
accum = [kfmt % j for j in range(i, i + chunksize)]
db._http.remove_bulk(accum)
@contextlib.contextmanager
def timed(msg, *params):
pstr = ', '.join(map(str, params))
s = time.time()
yield
print('%0.3fs - %s(%s)' % (time.time() - s, msg, pstr))
SETTINGS = (
# (nrows, chunksiz, ksiz, vsiz).
(200000, 10000, 48, 512), # ~100MB of data, 20 batches.
(25000, 1250, 256, 1024 * 4), # ~100MB of data, 20 batches.
(1700, 100, 256, 1024 * 64), # ~100MB of data, 17 batches.
)
for nrows, chunksiz, ksiz, vsiz in SETTINGS:
with timed('set_bulk', nrows, chunksiz, ksiz, vsiz):
do_set_bulk(nrows, chunksiz, ksiz, vsiz)
with timed('get_bulk', nrows, chunksiz, ksiz, vsiz):
do_get_bulk(nrows, chunksiz, ksiz, vsiz)
with timed('remove_bulk', nrows, chunksiz, ksiz, vsiz):
do_remove_bulk(nrows, chunksiz, ksiz, vsiz)
db.clear()
with timed('set_bulk_http', nrows, chunksiz, ksiz, vsiz):
do_set_bulk_http(nrows, chunksiz, ksiz, vsiz)
with timed('get_bulk_http', nrows, chunksiz, ksiz, vsiz):
do_get_bulk_http(nrows, chunksiz, ksiz, vsiz)
with timed('remove_bulk_http', nrows, chunksiz, ksiz, vsiz):
do_remove_bulk_http(nrows, chunksiz, ksiz, vsiz)
db.clear()
print('\n')
try:
server.stop()
except OSError:
pass
| [
"coleifer@gmail.com"
] | coleifer@gmail.com |
386b3bc30aca55f030b6bdd3ab16c9f190343c1f | feb1f6eb4a9ba625f82c4460b07cfd0d93995cef | /app/timer/models/utils.py | 0aa5b63ffd456c6687fdec44b64d777007f84d8f | [
"BSD-3-Clause"
] | permissive | nikdoof/limetime | c514f2612baeaf40161a766ab9a10766d9593974 | be15e7bede21dfd4db5ad3fcecc1d385e89a06c7 | refs/heads/master | 2016-09-05T19:56:23.442175 | 2014-04-10T07:32:27 | 2014-04-10T07:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,275 | py | import django
from django.db.models.query import QuerySet
from django.db.models.fields.related import OneToOneField
from django.core.exceptions import ObjectDoesNotExist
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError: # Django < 1.5
from django.db.models.sql.constants import LOOKUP_SEP
class InheritanceQuerySet(QuerySet):
def select_subclasses(self, *subclasses):
if not subclasses:
# only recurse one level on Django < 1.6 to avoid triggering
# https://code.djangoproject.com/ticket/16572
levels = None
if django.VERSION < (1, 6, 0):
levels = 1
subclasses = self._get_subclasses_recurse(self.model, levels=levels)
# workaround https://code.djangoproject.com/ticket/16855
field_dict = self.query.select_related
new_qs = self.select_related(*subclasses)
if isinstance(new_qs.query.select_related, dict) and isinstance(field_dict, dict):
new_qs.query.select_related.update(field_dict)
new_qs.subclasses = subclasses
return new_qs
def _clone(self, klass=None, setup=False, **kwargs):
for name in ['subclasses', '_annotated']:
if hasattr(self, name):
kwargs[name] = getattr(self, name)
return super(InheritanceQuerySet, self)._clone(klass, setup, **kwargs)
def annotate(self, *args, **kwargs):
qset = super(InheritanceQuerySet, self).annotate(*args, **kwargs)
qset._annotated = [a.default_alias for a in args] + list(kwargs.keys())
return qset
def iterator(self):
iter = super(InheritanceQuerySet, self).iterator()
if getattr(self, 'subclasses', False):
for obj in iter:
sub_obj = None
for s in self.subclasses:
sub_obj = self._get_sub_obj_recurse(obj, s)
if sub_obj:
break
if not sub_obj:
sub_obj = obj
if getattr(self, '_annotated', False):
for k in self._annotated:
setattr(sub_obj, k, getattr(obj, k))
yield sub_obj
else:
for obj in iter:
yield obj
def _get_subclasses_recurse(self, model, levels=None):
rels = [rel for rel in model._meta.get_all_related_objects()
if isinstance(rel.field, OneToOneField)
and issubclass(rel.field.model, model)]
subclasses = []
if levels:
levels -= 1
for rel in rels:
if levels or levels is None:
for subclass in self._get_subclasses_recurse(
rel.field.model, levels=levels):
subclasses.append(rel.var_name + LOOKUP_SEP + subclass)
subclasses.append(rel.var_name)
return subclasses
def _get_sub_obj_recurse(self, obj, s):
rel, _, s = s.partition(LOOKUP_SEP)
try:
node = getattr(obj, rel)
except ObjectDoesNotExist:
return None
if s:
child = self._get_sub_obj_recurse(node, s)
return child or node
else:
return node | [
"andy@tensixtyone.com"
] | andy@tensixtyone.com |
be5a002e6baf0803a2f64e95e737f83c75e29c9e | c0452806977529ea1e94b8c50cb513a71708cd7d | /prework_app/urls.py | 9a52c4494841b1a4589f687d16b9f94d11e740b3 | [] | no_license | carter3689/django-demo | ff08cfa3d55b101297b31f6bb60b6585cdb1e4dc | 9e548268de311d848fe23062d240f6eb8d64d165 | refs/heads/master | 2021-01-22T10:26:16.545024 | 2017-05-29T08:16:03 | 2017-05-29T08:16:03 | 92,645,100 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.conf.urls import url
from prework_app.views import IndexView,users,form_name_view,UserView
app_name = 'prework_app'
urlpatterns = [
url(r'^$',form_name_view, name='form_view'),
url(r'^$',users,name = 'users'),
url(r'^$', UserView.as_view(), name='user')
]
| [
"carter3689@gmail.com"
] | carter3689@gmail.com |
ec5c3906a42b94a5c4c10e5b9e6f525b4290f75b | d733778f08c8929bbe6ac5a712aee637ba9f076b | /plot.py | 2babdcc46fac15ddad9bda6dea3340adf0889061 | [] | no_license | dheeraj7596/Coarse2Fine | 33d7f2ae409b64396f5eb0dd9359289dfebc78c2 | 251142b2e704e595e664031f5a469ebad6de8333 | refs/heads/master | 2023-06-24T02:24:35.810100 | 2021-07-18T06:27:06 | 2021-07-18T06:27:06 | 282,985,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | import json
import matplotlib.pyplot as plt
if __name__ == "__main__":
basepath = "/Users/dheerajmekala/Work/Coarse2Fine/data/"
# basepath = "/data4/dheeraj/coarse2fine/"
dataset = "nyt/"
pkl_dump_dir = basepath + dataset
plot_dump_dir = pkl_dump_dir + "plots/"
all_sims = json.load(open(pkl_dump_dir + "all_sims_label_top_words_labels.json", "r"))
for l in all_sims:
values = list(all_sims[l].values())
plt.figure()
n = plt.hist(values, color='blue', edgecolor='black', bins=100)
plt.savefig(plot_dump_dir + l + ".png")
| [
"dheeraj7596.dm@gmail.com"
] | dheeraj7596.dm@gmail.com |
27ed2d04fc75705562acaaaa4407c4cd210a2a50 | 9de63f25656bd8984de6765bc96356bcfd8ff0ed | /example/example.py | 104e3978bb04d80c78abbab3f8098eae814a8cb8 | [
"MIT"
] | permissive | asdlei99/easyyaml | 636da05d62ee710b1ef3d9d42d8af4e84cc0269e | 1c73e4f26ffa6d26f8594a58f09c85a48525c3e2 | refs/heads/master | 2023-01-05T06:47:38.534799 | 2020-11-02T11:23:17 | 2020-11-02T11:23:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import easyyaml as eyaml
def main():
yd = eyaml.load(eyaml.__test_yaml_file__)
for _ in range(4):
yd.list.pop()
yd.name = "this_is_a_simple_example_of_eyaml"
eyaml.save(eyaml.__temp_yaml_file__, yd)
eyaml.show(yd)
if __name__ == '__main__':
main()
| [
"dfzspzq@163.com"
] | dfzspzq@163.com |
7d8b107ffe66bba742fa052e3d32ce83209b8d1a | ed5961c9a3ae027a37913047bd149296955a7497 | /block_zoo/encoder_decoder/SLUDecoder.py | ae7cc6898afa9fc93874c260f6b132fcd48fb0e7 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | microsoft/NeuronBlocks | c3de6b1afc431521e70c04ce82b54e7a3292f3aa | 47e03e09589e86d16c609511bf875bd3e3ff3a3e | refs/heads/master | 2023-08-30T08:51:20.228137 | 2022-11-28T19:10:10 | 2022-11-28T19:10:10 | 181,388,576 | 1,308 | 195 | MIT | 2023-07-22T03:07:56 | 2019-04-15T01:01:24 | Python | UTF-8 | Python | false | false | 7,519 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import copy
import numpy as np
from block_zoo.BaseLayer import BaseLayer, BaseConf
#from layers.EncoderDecoder import EncoderDecoderConf
from utils.DocInherit import DocInherit
from utils.corpus_utils import get_seq_mask
class SLUDecoderConf(BaseConf):
""" Configuration of Spoken Language Understanding Encoder
References:
Liu, B., & Lane, I. (2016). Attention-based recurrent neural network models for joint intent detection and slot filling. Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH, (1), 685–689. https://doi.org/10.21437/Interspeech.2016-1352
Args:
hidden_dim (int): dimension of hidden state
dropout (float): dropout rate
num_layers (int): number of BiLSTM layers
num_decoder_output (int):
"""
def __init__(self, **kwargs):
super(SLUDecoderConf, self).__init__(**kwargs)
@DocInherit
def default(self):
self.hidden_dim = 128
self.dropout = 0.0
self.num_layers = 1
self.decoder_emb_dim = 100
# number of decoder's outputs. E.g., for slot tagging, num_decoder_output means the number of tags;
# for machine translation, num_decoder_output means the number of words in the target language;
self.decoder_vocab_size = 10000
#input_dim and input_context_dim should be inferenced from encoder
@DocInherit
def declare(self):
self.num_of_inputs = 1
self.input_ranks = [3]
@DocInherit
def inference(self):
self.output_dim = copy.deepcopy(self.input_dims[0])
self.output_dim[-1] = self.decoder_vocab_size
super(SLUDecoderConf, self).inference() # PUT THIS LINE AT THE END OF inference()
@DocInherit
def verify(self):
super(SLUDecoderConf, self).verify()
necessary_attrs_for_user = ['hidden_dim', 'dropout', 'num_layers', 'decoder_emb_dim', 'decoder_vocab_size']
for attr in necessary_attrs_for_user:
self.add_attr_exist_assertion_for_user(attr)
necessary_attrs_for_dev = ['input_dims', 'input_context_dims']
for attr in necessary_attrs_for_dev:
self.add_attr_exist_assertion_for_dev(attr)
class SLUDecoder(BaseLayer):
""" Spoken Language Understanding Encoder
References:
Liu, B., & Lane, I. (2016). Attention-based recurrent neural network models for joint intent detection and slot filling. Proceedings of the Annual Conference of the International Speech Communication Association, INTERSPEECH, (1), 685–689. https://doi.org/10.21437/Interspeech.2016-1352
Args:
layer_conf (SLUEncoderConf): configuration of a layer
"""
def __init__(self, layer_conf):
super(SLUDecoder, self).__init__(layer_conf)
self.layer_conf = layer_conf
self.embedding = nn.Embedding(layer_conf.decoder_vocab_size, layer_conf.decoder_emb_dim)
self.embedding.weight.data.uniform_(-0.1, 0.1) # init
#nn.init.uniform(self.embedding.weight, -0.1, 0.1)
#self.dropout = nn.Dropout(self.dropout_p)
#self.lstm = nn.LSTM(layer_conf.decoder_emb_dim + layer_conf.hidden_dim * 2, layer_conf.hidden_dim, layer_conf.num_layers, batch_first=True)
self.lstm = nn.LSTM(layer_conf.decoder_emb_dim + layer_conf.input_dims[0][-1] + layer_conf.input_context_dims[0][-1],
layer_conf.hidden_dim, layer_conf.num_layers, batch_first=True) # CAUTION: single direction
self.attn = nn.Linear(layer_conf.input_context_dims[0][-1], layer_conf.hidden_dim *layer_conf.num_layers) # Attention
self.slot_out = nn.Linear(layer_conf.input_context_dims[0][-1] + layer_conf.hidden_dim * 1 *layer_conf.num_layers, layer_conf.decoder_vocab_size)
def Attention(self, hidden, encoder_outputs, encoder_maskings):
"""
Args:
hidden : 1,B,D
encoder_outputs : B,T,D
encoder_maskings : B,T # ByteTensor
"""
hidden = hidden.view(hidden.size()[1], -1).unsqueeze(2)
batch_size = encoder_outputs.size(0) # B
max_len = encoder_outputs.size(1) # T
energies = self.attn(encoder_outputs.contiguous().view(batch_size * max_len, -1)) # B*T,D -> B*T,D
energies = energies.view(batch_size, max_len, -1) # B,T,D
attn_energies = energies.bmm(hidden).transpose(1, 2) # B,T,D * B,D,1 --> B,1,T
attn_energies = attn_energies.squeeze(1).masked_fill(encoder_maskings, -1e12) # PAD masking
alpha = F.softmax(attn_energies) # B,T
alpha = alpha.unsqueeze(1) # B,1,T
context = alpha.bmm(encoder_outputs) # B,1,T * B,T,D => B,1,D
return context # B,1,D
def forward(self, string, string_len, context, encoder_outputs):
""" process inputs
Args:
string (Variable): word ids, [batch_size, seq_len]
string_len (ndarray): [batch_size]
context (Variable): [batch_size, 1, input_dim]
encoder_outputs (Variable): [batch_size, max_seq_len, input_dim]
Returns:
Variable : decode scores with shape [batch_size, seq_len, decoder_vocab_size]
"""
batch_size = string.size(0)
if torch.cuda.device_count() > 1:
# otherwise, it will raise a Exception because the length inconsistence
string_mask = torch.ByteTensor(1 - get_seq_mask(string_len, max_seq_len=string.shape[1])) # [batch_size, max_seq_len]
else:
string_mask = torch.ByteTensor(1 - get_seq_mask(string_len)) # [batch_size, max_seq_len]
decoded = torch.LongTensor([[1] * batch_size])
hidden_init = torch.zeros(self.layer_conf.num_layers * 1, batch_size, self.layer_conf.hidden_dim)
context_init = torch.zeros(self.layer_conf.num_layers*1, batch_size, self.layer_conf.hidden_dim)
if self.is_cuda():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
string_mask = string_mask.to(device)
# Note id of "<start>" is 1! decoded is a batch of '<start>' at first
decoded = decoded.to(device)
hidden_init = hidden_init.to(device)
context_init = context_init.to(device)
decoded = decoded.transpose(1, 0) # [batch_size, 1]
embedded = self.embedding(decoded)
hidden = (hidden_init, context_init)
decode = []
aligns = encoder_outputs.transpose(0, 1) #[seq_len, bs, input_dim]
length = encoder_outputs.size(1)
for i in range(length):
aligned = aligns[i].unsqueeze(1) # [bs, 1, input_dim]
self.lstm.flatten_parameters()
_, hidden = self.lstm(torch.cat((embedded, context, aligned), 2), hidden)
concated = torch.cat((hidden[0].view(1, batch_size, -1), context.transpose(0, 1)), 2)
score = self.slot_out(concated.squeeze(0))
softmaxed = F.log_softmax(score) # decoder_vocab_dim
decode.append(softmaxed)
_, decoded = torch.max(softmaxed, 1)
embedded = self.embedding(decoded.unsqueeze(1))
context = self.Attention(hidden[0], encoder_outputs, string_mask)
slot_scores = torch.cat(decode, 1)
return slot_scores.view(batch_size, length, -1)
| [
"shoulinjun@126.com"
] | shoulinjun@126.com |
d95771d977993f3bd9d4c4741a0cf9b9de3ed8d1 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L67/67-53_wat_20Abox/set_1ns_equi_m.py | db06e4e954d0b1dc8af42c0450c5982ca84ec1a2 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L67/wat_20Abox/ti_one-step/67_53/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../67-53_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
a5bc0568e7686a75b26eb37df77e0759c8b474c1 | 97249b5f3b0054ccb7e61b211c525af7e9842f48 | /clickpost/models.py | 68bae3ce86d2f60107467bf25d149d7e2665b78c | [] | no_license | itssonamsinha/testing2 | ebaf88b7c30c8d9bd995e0eac687c8650c3ebc83 | 8800baf8cf3dd5bbfc97959bab0a2c1a674c7587 | refs/heads/master | 2021-03-15T06:12:24.921359 | 2020-03-18T07:42:16 | 2020-03-18T07:42:16 | 246,830,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,518 | py | import abc
import datetime
import json
from django.contrib import admin
from django.db import models
from clickpost.rabbitmq_client import publish_message
class User(models.Model):
name = models.CharField(max_length=500, blank=False, null=False)
phone_number = models.CharField(max_length=10, blank=False, null=True, default=None)
email = models.EmailField(max_length=100, blank=False, null=True, default=None)
is_phone_number_verified = models.BooleanField(verbose_name='Phone Number Verified', default=False)
is_verified = models.BooleanField(verbose_name='Verified', default=False)
class Meta:
db_table = "user"
class NotificationAction:
SMS_NOTIFICATION = 1
WHATSAPP_Notification = 2
NOTIFICATION_TYPE_CHOICES = (
(SMS_NOTIFICATION, "sms_notification"),
(WHATSAPP_Notification, "whatsapp_notification"),)
class SmsNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, blank=True, null=True)
shipment_id = models.BigIntegerField()
sent_at = models.DateTimeField(blank=True, null=True, default=None)
status = models.CharField(max_length=100, blank=False, null=False)
class Meta:
db_table = "sms_notification"
def send(self, user, shipment_id, status, *args, **kwargs):
sms_noti = SmsNotification.objects.create(user=user, shipment_id=shipment_id, status=status, sent_at=datetime.datetime.now())
sms_text = None
# create sms text to be send
message = {
"data": sms_text,
"type": "sms"
}
# the message to be sent
publish_message(json.dumps(message))
class WhtsappNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, blank=True, null=True)
shipment_id = models.BigIntegerField()
sent_at = models.DateTimeField(blank=True, null=True, default=None)
status = models.CharField(max_length=100, blank=False, null=False)
template_name = models.CharField(max_length=100, null=False, blank=False)
def send(self, user, shipment_id, status, *args, **kwargs):
# template_name is the name of the template approved
template_name = None
whatsapp_noti = WhtsappNotification.objects.create(user=user, shipment_id=shipment_id, status=status,
sent_at=datetime.datetime.now(), template_name=template_name)
message = None #the message to be sent
# create message over here
whatsapp_message = {"media": {},
"message": "",
"template": {
"name": template_name,
"params": message
},
"message_type": "HSM"
}
whatsapp_payload = {
"data": whatsapp_message,
"type": "whatsapp"
}
publish_message(json.dumps(whatsapp_payload))
class Meta:
db_table = "whtsapp_notification"
class StatusNotificationTypeRecords(models.Model):
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, blank=True, null=True)
status = models.TextField()
notification_type = models.PositiveIntegerField(choices=NotificationAction.NOTIFICATION_TYPE_CHOICES)
is_sent = models.BooleanField(default=False)
class Meta:
db_table = "status_notification_type_records"
admin.site.register(User) | [
"sonamsinha@policybazaar.com"
] | sonamsinha@policybazaar.com |
0ad139fdf5102479fa5fdc4113f6187c844323da | 0d61f90e3a7877e91d72fed71b0895c7070dc046 | /final_project/.history/project/menu_app/views_20201231151708.py | 7f07a804f6036b814055d3102e0b627ccb9fec5b | [] | no_license | lienusrob/final_project | 44d7d90dc0b7efc0cf55501549a5af0110d09b3b | 4164769626813f044ec2af3e7842514b5699ef77 | refs/heads/master | 2023-02-10T16:36:33.439215 | 2021-01-05T09:34:01 | 2021-01-05T09:34:01 | 325,002,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py |
from .models import MenuItem, ItemsCategory, Order, generate_order_id
from account_app.models import Profile
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404
class MenuListView(ListView):
model = MenuItem
template_name = 'items/menu_list.html'
def menu_list_view(request):
item_list = MenuItem.objects.all()
context = {'item_list': item_list,
'item_categories':reversed(ItemsCategory.objects.all()),
'item_categories_side_nav':reversed(ItemsCategory.objects.all())}
return render(request, 'menu_app/menu_list.html', context)
def menu_drop(request):
category_menu = ItemsCategory.objects.all( )
def menu_item_detail(request, **kwargs):
item = MenuItem.objects.filter(id=kwargs.get('pk')).first()
context = {'item':item}
return render(request, 'menu_app/item_details.html', context)
def new_order_info(request):
user_profile = get_object_or_404(Profile, user=request.user)
order, created = Order.objects.get_or_create(customer=user_profile.user, is_ordered=False)
if created:
order.ref_code = generate_order_id()
order.save()
context = {'order':order}
return render(request, 'items/order_info.html', context) | [
"lienus.rob@hotmail.de"
] | lienus.rob@hotmail.de |
2aa563507ebf8be9aecab0a02d27c354241c9752 | 511bec356e4126cc4d102f770d80ba62571b1402 | /test-integration/speed_test.py | e0c3f00788293a38b59f80f9558fe94901684af0 | [
"MIT"
] | permissive | GrantMcConachie/NP-Classifier | 09999dc2d2a24ebe40b320e6c43269c311865d02 | 7e4e2001a1416b96968240650dc7d70f1275fdb5 | refs/heads/master | 2023-08-20T23:26:54.176839 | 2021-10-31T21:41:11 | 2021-10-31T21:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | import pandas as pd
import urllib.parse
import requests
from tqdm import tqdm
import grequests
import time
#SERVER_URL = "http://localhost:6541"
SERVER_URL = "http://dorresteintesthub.ucsd.edu:6541"
#SERVER_URL = "http://npclassifier.ucsd.edu:6541"
def test_speed():
df = pd.read_csv("test.tsv", sep=",")
iterations = 100
all_urls = []
for i in range(iterations):
for entry in df.to_dict(orient="records"):
smiles = str(entry["smiles"])
if len(smiles) > 5:
request_url = "{}/classify?smiles={}".format(SERVER_URL, urllib.parse.quote(smiles))
all_urls.append(request_url)
# Lets actually do the query and measure the speed
rs = (grequests.get(u) for u in all_urls)
start_time = time.time()
responses = grequests.map(rs, size=20)
print("--- {} seconds for {} Requests---".format(time.time() - start_time, len(all_urls)))
| [
"mwang87@gmail.com"
] | mwang87@gmail.com |
119faacc38c84726924f3d8fa5ac47869128fd91 | 0cf7f887cc778b3cce1c6e60aceafccac41664f2 | /Skyend_server/skyend_configs/views/UnidadMedidaView.py | 7efd54e2595e3575e346f1979d7f7bd9ad0ee34d | [
"Apache-2.0"
] | permissive | upeuapps/skyend | 08398110ac0e65f14945d41796d9921d5185916b | 9f5d9752d4965a58ceefbdcb4eace224d7246832 | refs/heads/master | 2021-01-10T02:06:27.719998 | 2015-12-21T20:37:44 | 2015-12-21T20:37:44 | 48,392,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | from django.shortcuts import render
from django.conf.urls import url, include
from django.contrib.auth.models import User
from rest_framework import routers, serializers, viewsets
from rest_framework import permissions
from ..models.UnidadMedida import UnidadMedida
from ..utils import SetPagination
from django.db.models import Q
from operator import __or__ as OR
from functools import reduce
from rest_framework.response import Response
from .InsumoView import InsumoSerializer
class UnidadMedidaSerializer(serializers.ModelSerializer):
insumo_set = InsumoSerializer(many=True, read_only=True)
class Meta:
model = UnidadMedida
# fields = ('url', 'abrev', 'descr')
class UnidadMedidaViewSet(viewsets.ModelViewSet): # API REST
queryset = UnidadMedida.objects.filter()
serializer_class = UnidadMedidaSerializer
pagination_class = SetPagination
# paginate_by = 3
# permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
query = self.request.query_params.get('query', '')
queryall = (Q(codigo__icontains=query),
Q(nombre__icontains=query))
queryset = self.queryset.filter(reduce(OR, queryall))
return queryset
| [
"asullom@gmail.com"
] | asullom@gmail.com |
f1047d2ce99a22b134545d90c1ddc3b6d600592e | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5695413893988352_0/Python/progiv/close_match.py | de2bc3d31272ead8036a2f9ebf22e4067d456aba | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | import math
import sys
sys.setrecursionlimit(1000000)
inf = open("in.in", "r")
ouf = open('out.out','w')
def close_files():
inf.close()
ouf.close()
def precount():
pass
printcounter = 0
def printstr(a):
global printcounter
printcounter +=1
print ('Case #%d: %s' % (printcounter,a), file=ouf)
full = set(range(10))
def check(s1,s2):
#~ good = True
for (c1,c2) in zip(s1,s2):
if c1!=c2 and c2 !='?':
return False
return True
def addlead(s,n):
s = str(s)
while len(s)<n:
s = '0'+s
return s
def solvetest():
C,J = inf.readline().split()
l = len(C)
diff=10**18
c1 = 0
c2 = 0
for i in range(10**l):
if check(addlead(i,l),C):
for j in range(10**l):
if abs(i-j)<diff and check(addlead(j,l),J):
diff = abs(i-j)
c1 = i
c2 = j
printstr(' '.join([addlead(c1,l), addlead(c2,l)]))
#precount()
testnum = int(inf.readline())
for test in range(testnum):
solvetest()
close_files()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
330d6c5a2a9e117515c7454e5f80eff8b300427c | 7db93c328243cd2f6ffcabb66b0d148bb0e3d198 | /Offer_book_problems/053getNumOfKInSortedArr.py | 840163fc03f018d1411af66a15b01ccf386a7d8c | [
"MIT"
] | permissive | zhaoxinlu/leetcode-algorithms | 62cc67efdc1b0e8514c83bb7643b369b4f681948 | f5e1c94c99628e7fb04ba158f686a55a8093e933 | refs/heads/master | 2021-05-11T23:47:43.385660 | 2018-04-25T08:27:57 | 2018-04-25T08:27:57 | 117,520,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-02-26
Function: 数字在排序数组中出现的次数
借助二分查找的方法,时间复杂度:O(logn)
"""
def getFirstK(arr, value, start, end):
if start > end:
return -1
midIndex = (start + end) / 2
midValue = arr[midIndex]
if midValue == value:
if (midIndex > 0 and arr[midIndex-1] != value) or midIndex == 0:
return midIndex
else:
end = midIndex - 1
elif midValue > value:
end = midIndex - 1
else:
start = midIndex + 1
return getFirstK(arr, value, start, end)
def getLastK(arr, value, start, end):
if start > end:
return -1
midIndex = (start + end) / 2
midValue = arr[midIndex]
if midValue == value:
if (midIndex < (len(arr)-1) and arr[midIndex + 1] != value) or midIndex == (len(arr)-1):
return midIndex
else:
start = midIndex + 1
elif midValue > value:
end = midIndex - 1
else:
start = midIndex + 1
return getLastK(arr, value, start, end)
def getNumOfKInSortedArr(arr, value):
result = 0
if arr != None and len(arr) > 0:
first = getFirstK(arr, value, 0, len(arr)-1)
last = getLastK(arr, value, 0, len(arr)-1)
if first > -1 and last > -1:
result = last - first + 1
return result
if __name__ == '__main__':
print getNumOfKInSortedArr([1, 2, 3, 3, 3, 3, 4, 5], 3) | [
"446571703@qq.com"
] | 446571703@qq.com |
3d5f439474ba5c97c1cc80cfdd6b3beb7aef476d | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/81984_Class_Adoption/recipe-81984.py | 79495516d3d13abcd312f18b3e07647b058c417a | [
"Python-2.0",
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 837 | py | def adopt_class(klass, obj, *args, **kwds):
'reclass obj to inherit klass; call __init__ with *args, **kwds'
classname = '%s_%s' % (klass.__name__, obj.__class__.__name__)
obj.__class__ = new.classobj(classname, (klass, obj.__class__), {})
klass.__init__(obj, *args, **kwds)
def demo():
class Sandwich:
def __init__(self, ingredients):
self.ingredients = ingredients
def __repr__(self):
return reduce((lambda a,b: a+' and '+b), self.ingredients)
class WithSpam:
def __init__(self, spam_count):
self.spam_count = spam_count
def __repr__(self):
return Sandwich.__repr__(self) + self.spam_count * ' and spam'
pbs = Sandwich(['peanut butter', 'jelly'])
adopt_class(WithSpam, pbs, 2)
print pbs
| [
"betty@qburst.com"
] | betty@qburst.com |
b79b85808478527832c5ffe03ceecd59e2b003f1 | 65c0ef56c2e2c3e1646a610f49e6dd06f2c6102d | /src/libs/core/register/base/base.py | 51a4b1a8094af5328e72bb744227c24ebe26e60c | [
"MIT"
] | permissive | VirtualVFix/AndroidTestFramework | d3411f328a793ee7b007c4736983204aae81b739 | 1feb769c6aca39a78e6daefd6face0a1e4d62cd4 | refs/heads/master | 2020-08-11T14:48:12.454415 | 2019-10-12T10:20:43 | 2019-10-12T10:20:43 | 214,582,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | # All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "28/09/17 12:06"
import inspect
import hashlib
from libs.core.logger import getSysLogger
from .exceptions import ConfigAccessError, ConfigError
class Base:
"""
Base config class.
Logging all changes in attributes.
"""
syslogger = getSysLogger()
def __init__(self):
pass
def __setattr__(self, name, value):
"""
Set config attribute and check if attribute is not locked.
Also logging config changes.
"""
frame = inspect.currentframe()
self.syslogger.info('SET [%s.%s = %s]' % (frame.f_locals['self'].__class__.__name__, name, value))
# check attribute locked
if hasattr(self, ('_%s__lock__%s' % (self.__class__.__name__, name)).lower()):
raise ConfigAccessError('[%s.%s] variable is locked and cannot be changed !'
% (self.__class__.__name__, name))
super(Base, self).__setattr__(name, value)
def __getattribute__ (self, name):
"""
Get config attribute and check if attribute is not locked.
"""
if not name.startswith('_'):
return self.__check_lock(name)
return super(Base, self).__getattribute__(name)
def __delattr__(self, name):
frame = inspect.currentframe()
self.syslogger.info('DEL [%s.%s]' % (frame.f_locals['self'].__class__.__name__, name))
super(Base, self).__delattr__(name)
def LOCK(self, name):
"""
Lock variable to changes.
Exceptions:
ConfigError if variable to lock not found
Args:
name (str): Variable name to lock
"""
if not hasattr(self, name):
raise ConfigError('[%s] variable to lock not found in [%s] config !' % (name, self.__class__.__name__))
_lock = ('_%s__lock__%s' % (self.__class__.__name__, name)).lower()
setattr(self, _lock, hashlib.md5(str(getattr(self, name)).encode('utf-8')).hexdigest())
def __check_lock(self, name):
"""
Check if variable locked.
Exceptions:
ConfigAccessError if attempt changes to locked variable
Args:
name (str): Variable name to check
"""
# check if variable locked
_lock = ('_%s__lock__%s' % (self.__class__.__name__, name)).lower()
if hasattr(self, _lock):
_lock = getattr(self, _lock)
if _lock != hashlib.md5(str(super(Base, self).__getattribute__(name)).encode('utf-8')).hexdigest():
raise ConfigAccessError('[%s.%s] variable is locked and cannot be changed !'
% (self.__class__.__name__, name))
return super(Base, self).__getattribute__(name)
| [
"github.com/virtualvfix"
] | github.com/virtualvfix |
0c6259601f18d2e0b4e09fd2371369bbd6751519 | bfae005cb0471c2656be26f6b4721d5edcf30bf4 | /tests/test_paths.py | 8270303aa4fce98f93a6bcd3bd72e9ce6e4d8084 | [
"MIT"
] | permissive | jdmcclain47/opt_einsum | 740d84e4fcfc275bc39af319be2e81114a722075 | 6884abf482e3759a45af5ca5c9361429b646e38c | refs/heads/master | 2021-05-09T10:21:16.322977 | 2017-12-07T11:55:48 | 2017-12-07T11:55:48 | 118,960,706 | 1 | 0 | null | 2018-01-25T19:54:57 | 2018-01-25T19:54:57 | null | UTF-8 | Python | false | false | 5,324 | py | """
Tests the accuracy of the opt_einsum paths in addition to unit tests for
the various path helper functions.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import opt_einsum as oe
import pytest
explicit_path_tests = {
'GEMM1': ([set('abd'), set('ac'), set('bdc')], set(''), {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}),
'Inner1': ([set('abcd'), set('abc'), set('bc')], set(''), {
'a': 5,
'b': 2,
'c': 3,
'd': 4
}),
}
path_edge_tests = [
['greedy', 'eb,cb,fb->cef', ((0, 2), (0, 1))],
['optimal', 'eb,cb,fb->cef', ((0, 2), (0, 1))],
['greedy', 'dd,fb,be,cdb->cef', ((0, 3), (0, 1), (0, 1))],
['optimal', 'dd,fb,be,cdb->cef', ((0, 3), (0, 1), (0, 1))],
['greedy', 'bca,cdb,dbf,afc->', ((1, 2), (0, 2), (0, 1))],
['optimal', 'bca,cdb,dbf,afc->', ((1, 2), (0, 2), (0, 1))],
['greedy', 'dcc,fce,ea,dbf->ab', ((0, 3), (0, 2), (0, 1))],
['optimal', 'dcc,fce,ea,dbf->ab', ((1, 2), (0, 2), (0, 1))],
]
def check_path(test_output, benchmark, bypass=False):
if not isinstance(test_output, list):
return False
if len(test_output) != len(benchmark):
return False
ret = True
for pos in range(len(test_output)):
ret &= isinstance(test_output[pos], tuple)
ret &= test_output[pos] == benchmark[pos]
return ret
def assert_contract_order(func, test_data, max_size, benchmark):
test_output = func(test_data[0], test_data[1], test_data[2], max_size)
assert check_path(test_output, benchmark)
def test_size_by_dict():
sizes_dict = {}
for ind, val in zip('abcdez', [2, 5, 9, 11, 13, 0]):
sizes_dict[ind] = val
path_func = oe.helpers.compute_size_by_dict
assert 1 == path_func('', sizes_dict)
assert 2 == path_func('a', sizes_dict)
assert 5 == path_func('b', sizes_dict)
assert 0 == path_func('z', sizes_dict)
assert 0 == path_func('az', sizes_dict)
assert 0 == path_func('zbc', sizes_dict)
assert 104 == path_func('aaae', sizes_dict)
assert 12870 == path_func('abcde', sizes_dict)
def test_flop_cost():
size_dict = {v: 10 for v in "abcdef"}
# Loop over an array
assert 10 == oe.helpers.flop_count("a", False, 1, size_dict)
# Hadamard product (*)
assert 10 == oe.helpers.flop_count("a", False, 2, size_dict)
assert 100 == oe.helpers.flop_count("ab", False, 2, size_dict)
# Inner product (+, *)
assert 20 == oe.helpers.flop_count("a", True, 2, size_dict)
assert 200 == oe.helpers.flop_count("ab", True, 2, size_dict)
# Inner product x3 (+, *, *)
assert 30 == oe.helpers.flop_count("a", True, 3, size_dict)
# GEMM
assert 2000 == oe.helpers.flop_count("abc", True, 2, size_dict)
def test_path_optimal():
test_func = oe.paths.optimal
test_data = explicit_path_tests['GEMM1']
assert_contract_order(test_func, test_data, 5000, [(0, 2), (0, 1)])
assert_contract_order(test_func, test_data, 0, [(0, 1, 2)])
def test_path_greedy():
test_func = oe.paths.greedy
test_data = explicit_path_tests['GEMM1']
assert_contract_order(test_func, test_data, 5000, [(0, 2), (0, 1)])
assert_contract_order(test_func, test_data, 0, [(0, 1, 2)])
def test_memory_paths():
expression = "abc,bdef,fghj,cem,mhk,ljk->adgl"
views = oe.helpers.build_views(expression)
# Test tiny memory limit
path_ret = oe.contract_path(expression, *views, path="optimal", memory_limit=5)
assert check_path(path_ret[0], [(0, 1, 2, 3, 4, 5)])
path_ret = oe.contract_path(expression, *views, path="greedy", memory_limit=5)
assert check_path(path_ret[0], [(0, 1, 2, 3, 4, 5)])
# Check the possibilities, greedy is capped
path_ret = oe.contract_path(expression, *views, path="optimal", memory_limit=-1)
assert check_path(path_ret[0], [(0, 3), (0, 4), (0, 2), (0, 2), (0, 1)])
path_ret = oe.contract_path(expression, *views, path="greedy", memory_limit=-1)
assert check_path(path_ret[0], [(2, 4), (3, 4), (2, 3), (1, 2), (0, 1)])
@pytest.mark.parametrize("alg,expression,order", path_edge_tests)
def test_path_edge_cases(alg, expression, order):
views = oe.helpers.build_views(expression)
# Test tiny memory limit
path_ret = oe.contract_path(expression, *views, path=alg)
assert check_path(path_ret[0], order)
def test_optimal_edge_cases():
# Edge test5
expression = 'a,ac,ab,ad,cd,bd,bc->'
edge_test4 = oe.helpers.build_views(expression, dimension_dict={"a": 20, "b": 20, "c": 20, "d": 20})
path, path_str = oe.contract_path(expression, *edge_test4, path='greedy')
assert check_path(path, [(0, 1), (0, 1, 2, 3, 4, 5)])
path, path_str = oe.contract_path(expression, *edge_test4, path='optimal')
assert check_path(path, [(0, 1), (0, 1, 2, 3, 4, 5)])
def test_greedy_edge_cases():
expression = "abc,cfd,dbe,efa"
dim_dict = {k: 20 for k in expression.replace(",", "")}
tensors = oe.helpers.build_views(expression, dimension_dict=dim_dict)
path, path_str = oe.contract_path(expression, *tensors, path='greedy')
assert check_path(path, [(0, 1, 2, 3)])
path, path_str = oe.contract_path(expression, *tensors, path='greedy', memory_limit=-1)
assert check_path(path, [(0, 1), (0, 2), (0, 1)])
| [
"malorian@me.com"
] | malorian@me.com |
98c39e2626dbe0c6783925c0e26ecacb2c415b1d | 597b535310930f158d4b1c3ab116ff5cb99082a6 | /clean_arch/cleana/usecase/auth/auth_interactor.py | 9ebcccba5b3caef32f9d03e51375e6460b85cee0 | [] | no_license | ganqzz/flask_demo | aa04584e82df41eeac8346d02c0ff313d3f81942 | 4c88bb3707e0fd47a5f9a096c4c8973f6e00aa99 | refs/heads/master | 2023-07-17T15:35:05.694467 | 2021-09-04T08:07:16 | 2021-09-04T08:07:16 | 356,182,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from cleana.repository.user.user_repository import UserRepository
from cleana.usecase.auth.auth_usecase import AuthUseCase
class AuthInteractor(AuthUseCase):
def __init__(self, user_repository: UserRepository):
self.user_repository = user_repository
def login(self, body: str):
pass
def logout(self):
pass
def sign_up(self, body: str):
pass
def update(self, body: str):
pass
| [
"ganqzz@users.noreply.github.com"
] | ganqzz@users.noreply.github.com |
6bc1860a6122ae953a92f8911faf9f6d47231d7c | dcaa383b42ee9b2cfb7a61b791877f6d7aa49f52 | /semantic_filtering/models/unet.py | 8ce56aefef1aed7517bb12f329891298500acef4 | [] | no_license | ver228/semantic_filtering | 910ed8b1381ea65b5c5bf4ef4c09270ab803e5ec | 606be573393fb5ff1dd22169182265ed18cc54aa | refs/heads/master | 2020-04-26T18:46:25.195929 | 2019-10-09T12:54:39 | 2019-10-09T12:54:39 | 173,754,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,670 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 19 11:13:46 2018
@author: avelinojaver
"""
import math
import torch
from torch import nn
import torch.nn.functional as F
def weights_init_xavier(m):
'''
Taken from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
'''
classname = m.__class__.__name__
# print(classname)
if classname.startswith('Conv'):
nn.init.xavier_normal_(m.weight.data, gain=1)
elif classname.startswith('Linear'):
nn.init.xavier_normal_(m.weight.data, gain=1)
elif classname.startswith('BatchNorm2d'):
nn.init.uniform_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
def _crop(x, x_to_crop):
c = (x_to_crop.size()[2] - x.size()[2])/2
c1, c2 = math.ceil(c), math.floor(c)
c = (x_to_crop.size()[3] - x.size()[3])/2
c3, c4 = math.ceil(c), math.floor(c)
cropped = F.pad(x_to_crop, (-c3, -c4, -c1, -c2)) #negative padding is the same as cropping
return cropped
def _conv3x3(n_in, n_out):
return [nn.Conv2d(n_in, n_out, 3, padding=1),
nn.LeakyReLU(negative_slope=0.1, inplace=True)]
class Down(nn.Module):
def __init__(self, n_in, n_out):
super().__init__()
_layers = _conv3x3(n_in, n_out) + [nn.MaxPool2d(2)]
self.conv_pooled = nn.Sequential(*_layers)
def forward(self, x):
x = self.conv_pooled(x)
return x
class Up(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode='nearest')
#self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
_layers = []
for ii in range(len(n_filters) - 1):
n_in, n_out = n_filters[ii], n_filters[ii+1]
_layers += _conv3x3(n_in, n_out)
self.conv = nn.Sequential(*_layers)
def forward(self, x1, x2):
x1 = self.up(x1)
x2 = _crop(x1, x2)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self, n_channels = 1, n_classes = 1):
super().__init__()
self.conv0 = nn.Sequential(*_conv3x3(n_channels, 48))
self.down1 = Down(48, 48)
self.down2 = Down(48, 48)
self.down3 = Down(48, 48)
self.down4 = Down(48, 48)
self.down5 = Down(48, 48)
self.conv6 = nn.Sequential(*_conv3x3(48, 48))
self.up5 = Up([96, 96, 96])
self.up4 = Up([144, 96, 96])
self.up3 = Up([144, 96, 96])
self.up2 = Up([144, 96, 96])
self.up1 = Up([96 + n_channels, 64, 32])
self.conv_out = nn.Sequential(nn.Conv2d(32, n_classes, 3, padding=1))
for m in self.modules():
weights_init_xavier(m)
def _unet(self, x_input):
x0 = self.conv0(x_input)
x1 = self.down1(x0)
x2 = self.down2(x1)
x3 = self.down3(x2)
x4 = self.down4(x3)
x5 = self.down5(x4)
x6 = self.conv6(x5)
x = self.up5(x6, x4)
x = self.up4(x, x3)
x = self.up3(x, x2)
x = self.up2(x, x1)
x = self.up1(x, x_input)
x = self.conv_out(x)
return x
def forward(self, x_input):
# the input shape must be divisible by 32 otherwise it will be cropped due
#to the way the upsampling in the network is done. Therefore it is better to path
#the image and recrop it to the original size
nn = 2**5
ss = [math.ceil(x/nn)*nn - x for x in x_input.shape[2:]]
pad_ = [(int(math.floor(x/2)),int(math.ceil(x/2))) for x in ss]
#use pytorch for the padding
pad_ = [x for d in pad_[::-1] for x in d]
pad_inv_ = [-x for x in pad_]
x_input = F.pad(x_input, pad_, 'reflect')
x = self._unet(x_input)
x = F.pad(x, pad_inv_)
return x
# def forward(self, x_input):
# x0 = self.conv0(x_input)
#
# x1 = self.down1(x0)
# x2 = self.down2(x1)
# x3 = self.down3(x2)
# x4 = self.down4(x3)
# x5 = self.down5(x4)
#
# x6 = self.conv6(x5)
#
# x = self.up5(x6, x4)
# x = self.up4(x, x3)
# x = self.up3(x, x2)
# x = self.up2(x, x1)
# x = self.up1(x, x_input)
#
# x = self.conv_out(x)
# return x
if __name__ == '__main__':
mod = UNet()
X = torch.rand((1, 1, 540, 600))
out = mod(X)
print(out.size())
| [
"ver228@gmail.com"
] | ver228@gmail.com |
8ef5b41058ae63d10c0a294e30136ad3f639ceb1 | 3dfb4ee39555b30e6e0c6fcdbef371864e69f694 | /google-cloud-sdk/.install/.backup/lib/surface/pubsub/topics/list_subscriptions.py | 177c57782bcb2c2897a5806b9cb1e3edf8114ed4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | MD-Anderson-Bioinformatics/NG-CHM_Galaxy | 41d1566d5e60416e13e023182ca4351304381a51 | dcf4886d4ec06b13282143ef795c5f0ff20ffee3 | refs/heads/master | 2021-06-02T21:04:12.194964 | 2021-04-29T14:45:32 | 2021-04-29T14:45:32 | 130,249,632 | 0 | 1 | null | 2020-07-24T18:35:21 | 2018-04-19T17:25:33 | Python | UTF-8 | Python | false | false | 3,886 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub topics list_subscriptions command."""
import re
from googlecloudsdk.api_lib.pubsub import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as sdk_ex
from googlecloudsdk.core.console import console_io as io
class ListSubscriptions(base.Command):
"""Lists Cloud Pub/Sub subscriptions from a given topic.
Lists all of the Cloud Pub/Sub subscriptions attached to the given topic and
that match the given filter.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'topic',
help=('The name of the topic to list subscriptions for.'))
parser.add_argument(
'--name-filter', '-f', default='',
help=('A regular expression that will limit which subscriptions are'
' returned by matching on subscription name.'))
parser.add_argument(
'--max-results', type=int, default=0,
help=('The maximum number of subscriptions that this'
' command may return.'
'This option is ignored if --name-filter is set.'))
@util.MapHttpError
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
Subscriptions paths that match the regular expression in args.name_filter.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
page_token = None
subscriptions_listed = 0
should_truncate_res = args.max_results and not args.name_filter
try:
while True:
list_subscriptions_req = (
msgs.PubsubProjectsTopicsSubscriptionsListRequest(
topic=util.TopicFormat(args.topic),
pageToken=page_token))
if should_truncate_res:
list_subscriptions_req.pageSize = min(args.max_results,
util.MAX_LIST_RESULTS)
list_subscriptions_result = pubsub.projects_topics_subscriptions.List(
list_subscriptions_req)
for subscription in list_subscriptions_result.subscriptions:
if not util.SubscriptionMatches(subscription, args.name_filter):
continue
# If max_results > 0 and we have already sent that
# amount of subscriptions, just raise (StopIteration) iff name_filter
# is not set, else this limit wouldn't make sense.
if should_truncate_res and subscriptions_listed >= args.max_results:
raise StopIteration()
subscriptions_listed += 1
yield subscription
page_token = list_subscriptions_result.nextPageToken
if not page_token:
break
except re.error as e:
raise sdk_ex.HttpException(str(e))
def Display(self, args, result):
"""This method is called to print the result of the Run() method.
Args:
args: The arguments that command was run with.
result: The value returned from the Run() method.
"""
subscriptions = [subscription for subscription in result]
printer = io.ListPrinter(
'{0} subscriptions(s) found'.format(len(subscriptions)))
printer.Print(subscriptions)
| [
"rbrown@insilico.us.com"
] | rbrown@insilico.us.com |
05f53b4af1881240285b8286fc6e4b690262b1b4 | bc441bb06b8948288f110af63feda4e798f30225 | /capacity_admin_sdk/model/topology/property_pb2.py | 70e3c45e1c38d72b24f9b0d928498f2c52426ea9 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,203 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: property.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from capacity_admin_sdk.model.topology import strategy_pb2 as capacity__admin__sdk_dot_model_dot_topology_dot_strategy__pb2
from capacity_admin_sdk.model.topology import cmdb_instance_pb2 as capacity__admin__sdk_dot_model_dot_topology_dot_cmdb__instance__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='property.proto',
package='topology',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topology'),
serialized_pb=_b('\n\x0eproperty.proto\x12\x08topology\x1a\x30\x63\x61pacity_admin_sdk/model/topology/strategy.proto\x1a\x35\x63\x61pacity_admin_sdk/model/topology/cmdb_instance.proto\"\x87\x01\n\x08Property\x12\x10\n\x08objectId\x18\x01 \x01(\t\x12\x12\n\ninstanceId\x18\x02 \x01(\t\x12$\n\x08strategy\x18\x03 \x01(\x0b\x32\x12.topology.Strategy\x12/\n\x0frelateInstances\x18\x04 \x03(\x0b\x32\x16.topology.CmdbInstanceBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topologyb\x06proto3')
,
dependencies=[capacity__admin__sdk_dot_model_dot_topology_dot_strategy__pb2.DESCRIPTOR,capacity__admin__sdk_dot_model_dot_topology_dot_cmdb__instance__pb2.DESCRIPTOR,])
_PROPERTY = _descriptor.Descriptor(
name='Property',
full_name='topology.Property',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='objectId', full_name='topology.Property.objectId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='topology.Property.instanceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='strategy', full_name='topology.Property.strategy', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relateInstances', full_name='topology.Property.relateInstances', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=134,
serialized_end=269,
)
_PROPERTY.fields_by_name['strategy'].message_type = capacity__admin__sdk_dot_model_dot_topology_dot_strategy__pb2._STRATEGY
_PROPERTY.fields_by_name['relateInstances'].message_type = capacity__admin__sdk_dot_model_dot_topology_dot_cmdb__instance__pb2._CMDBINSTANCE
DESCRIPTOR.message_types_by_name['Property'] = _PROPERTY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), {
'DESCRIPTOR' : _PROPERTY,
'__module__' : 'property_pb2'
# @@protoc_insertion_point(class_scope:topology.Property)
})
_sym_db.RegisterMessage(Property)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
49716f6e7d9a2cc68a255f009d7b531e59438bda | e01c5d1ee81cc4104b248be375e93ae29c4b3572 | /Sequence1/Graphs/graph.py | 39a00a0adeee465dc7b472d71fc8475cd0ec817a | [] | no_license | lalitzz/DS | 7de54281a34814601f26ee826c722d123ee8bd99 | 66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1 | refs/heads/master | 2021-10-14T09:47:08.754570 | 2018-12-29T11:00:25 | 2018-12-29T11:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | class Vertex:
def __init__(self, key):
self.id = key
self.connections = {}
def addNeighbour(self, nb, weight = 0):
self.connections[nb] = weight
def getConnections(self):
return self.connections.keys()
def getId(self):
return self.id
def getWeight(self, nb):
return self.connections[nb]
def __str__(self):
return str(self.id) + ' connected to: ' + str([x.id for x in self.connections])
class Graph:
def __init__(self):
self.vertexList = {}
self.numVertices = 0
def addVertex(self, key):
self.numVertices += 1
newVertex = Vertex(key)
self.vertexList[key] = newVertex
return newVertex
def getVertex(self, n):
if n in self.vertexList:
return self.vertexList[n]
else:
return None
def addEdge(self, f, t, cost = 0):
if f not in self.vertexList:
nv = self.addVertex(f)
if t not in self.vertexList:
nv = self.addVertex(t)
self.vertexList[f].addNeighbour(self.vertexList[t], cost)
def getVertices(self):
return self.vertexList.keys()
def __iter__(self):
return iter(self.vertexList)
def __contains__(self, n):
return sn in self.vertexList
g = Graph()
for i in range(6):
g.addVertex(i)
print(g.vertexList) | [
"lalit.slg007@gmail.com"
] | lalit.slg007@gmail.com |
15a0f2b2739bf63c905737134ef97e7e69c7b3ce | 37fbda71e4d38733d19b243522dcf8b47837e3a3 | /port/views2.py | b4c596eddc580cd91f49a0ca563eaf624fdb05bf | [] | no_license | chenhanfang/selfporttest | 381710f972be14c2bb9971c8cad9dc2e464f103b | d9a8fc0b967e596c19024be6dcf83e484215ec04 | refs/heads/master | 2021-01-23T11:32:29.105813 | 2017-06-09T08:19:01 | 2017-06-09T08:19:01 | 93,144,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | from django.shortcuts import render
from . import models
from port.models import Code
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
def porttest(request):###接口测试页
return render(request,'port/porttest.html')
| [
"chenhanfang@zhixunkeji.cn"
] | chenhanfang@zhixunkeji.cn |
aadadd426f2bb8fe1b92d9811dc80ed7f3ad9de2 | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/core/upgrades/20211213102105_fix_inbox_document_workflow/upgrade.py | aa317497a9aff58522188bd839c6f903e6fa5eec | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 191 | py | from ftw.upgrade import UpgradeStep
class FixInboxDocumentWorkflow(UpgradeStep):
"""Fix inbox document workflow.
"""
def __call__(self):
self.install_upgrade_profile()
| [
"e.schmutz@4teamwork.ch"
] | e.schmutz@4teamwork.ch |
3f0ccba4c2844d8f034f40b3e75d0d2caa97a224 | 6adf334dd2a074686447e15898ed3fff793aab48 | /04_Merge_Intervals/03_insert_interval.py | 32258a25e58b7d2d6edce2fbc7702efedd11b2ab | [] | no_license | satyapatibandla/Patterns-for-Coding-Interviews | 29ac1a15d5505293b83a8fb4acf12080851fe8d6 | b3eb2ac82fd640ecbdf3654a91a57a013be1806f | refs/heads/main | 2023-05-07T07:56:01.824272 | 2021-06-01T04:02:50 | 2021-06-01T04:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # Time O(N) | Space O(N)
def insert_interval(intervals, new_interval):
result = []
i, start, end = 0, 0, 1
while i < len(intervals) and intervals[i][start] < new_interval[start]:
result.append(intervals[i])
i += 1
while i < len(intervals) and intervals[i][start] <= new_interval[end]:
new_interval[start] = min(new_interval[start], intervals[i][start])
new_interval[end] = max(new_interval[end], intervals[i][end])
i += 1
result.append(new_interval)
while i < len(intervals):
result.append(intervals[i])
i += 1
return result
def main():
print("Intervals after inserting the new interval: " + str(insert_interval([[1, 3], [5, 7], [8, 12]], [4, 6])))
print("Intervals after inserting the new interval: " + str(insert_interval([[1, 3], [5, 7], [8, 12]], [4, 10])))
print("Intervals after inserting the new interval: " + str(insert_interval([[2, 3], [5, 7]], [1, 4])))
if __name__ == '__main__':
main()
| [
"shash873@gmail.com"
] | shash873@gmail.com |
4dffc0713e1908c77083f44c7bd070453f1290e4 | fd48fba90bb227017ac2da9786d59f9b9130aaf0 | /digsby/src/mail/pop.py | b86f9f64de4eba5f634e9f527ae06adbef2a2608 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | niterain/digsby | bb05b959c66b957237be68cd8576e3a7c0f7c693 | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | refs/heads/master | 2021-01-18T10:07:10.244382 | 2013-11-03T02:48:25 | 2013-11-03T02:48:25 | 5,991,568 | 1 | 0 | null | 2013-11-03T02:48:26 | 2012-09-28T02:24:50 | Python | UTF-8 | Python | false | false | 7,151 | py | '''
POP mail
'''
from mail.smtp import SMTPEmailAccount
from common import pref
from util import ResetTimer
from util.primitives.funcs import get
from logging import getLogger; log = getLogger('popmail'); info = log.info
from mail.emailobj import DecodedEmail
from mail.emailobj import Email
from traceback import print_exc
import email
from hashlib import sha1
from util.command_queue import CommandQueue, cmdqueue, callback_cmdqueue
class PopMail(SMTPEmailAccount):
protocol = 'pop'
default_timeout = 20
opening_email_marks_as_read = False
def __init__(self, **options):
d = self.default
self.popserver = options.get('popserver', '')
self.require_ssl = options.get('require_ssl', d('require_ssl'))
self.popport = options.get('popport', d('popport'))
self.uidlworks = None
self.topworks = True #assume it does until proven otherwise
self.cmdq = CommandQueue(start_hooks=[self._connect], end_hooks=[self._quit])
self.timeouttimer = ResetTimer(pref('pop.timeout',self.default_timeout), self.timeout_check)
SMTPEmailAccount.__init__(self, **options)
can_has_preview = True
def timeout_check(self):
log.info('Checking server connection for %r', self)
if self.state in (self.Statuses.OFFLINE, self.Statuses.ONLINE):
log.info('%s is not currently checking', self)
return True
if get(self, 'conn', False):
try:
self.conn.noop()
except:
self.on_error()
log.error('%s\'s server connection has failed', self)
return False
else:
log.error('%s has no conn attribute', self)
self.on_error()
return False
def update(self):
SMTPEmailAccount.update(self)
log.info('starting timeout timer')
self.timeouttimer.start()
self.real_update(success = self.finish_update)
def finish_update(self, updates):
import time
if self.state == self.Statuses.OFFLINE:
log.error('finish_update exiting early, state is %s', self.state)
return
(updated_emails, updated_count) = updates
log.info("%s got %d new messages %s", self, updated_count, time.ctime(time.time()))
#self.change_state(self.Statuses.ONLINE)
self._received_emails(updated_emails[:25], updated_count)
# if self.state in (self.Statuses.CONNECTING, self.Statuses.CHECKING):
# self.change_state(self.Statuses.ONLINE)
self.error_count = 0
log.info('stopping timeout timer')
self.timeouttimer.stop()
@callback_cmdqueue()
def real_update(self):
#self.change_state(self.Statuses.CHECKING)
if self.state == self.Statuses.OFFLINE:
return
conn = self.conn
num_emails, box_size = conn.stat()
num_emails = int(num_emails)
emails = []
def retr(mid):
if self.topworks:
try:
return conn.top(mid, 100)
except:
self.topworks = False
return conn.retr(mid)
uidl = conn.uidl()
if uidl[0].startswith("+"):
self.uidlworks = True
msg_tups = [tuple(tup.split()) for tup in uidl[1]][-25:]
for tup in msg_tups:
try:
mailmsg = retr(tup[0])
except Exception:
print_exc()
else:
try:
email_id = tup[1]
except IndexError:
email_id = None #someone had '1 ' -> ('1',) None seems to work fine.
emails.append(
Email.fromEmailMessage(email_id,
DecodedEmail(
email.message_from_string(
"\n".join(mailmsg[1])
))))
else:
self.uidlworks = False
num_to_get = min(num_emails, 25)
for i in xrange(num_to_get, max(num_to_get-25, -1), -1):
try:
mailmsg = retr(str(i))
except Exception:
print_exc()
else:
emailstring = "\n".join(mailmsg[1])
de = DecodedEmail(email.message_from_string(emailstring))
emails.append(Email.fromEmailMessage(
sha1(emailstring).hexdigest() + "SHA"+str(i)+"SHA", de))
return emails, num_emails
#self.change_state(self.Statuses.ONLINE)
# import time
# print num_emails, time.time()
def _connect(self):
if self.require_ssl:
from poplib import POP3_SSL as pop
else:
from poplib import POP3 as pop
try:
conn = pop(self.popserver, self.popport)
except Exception, e:
log.error('There was an error connecting: %s', e)
self.on_error()
raise
self.conn = conn
log.info(conn.user(self.name))
try:
password = self._decryptedpw().encode('utf-8')
log.info(conn.pass_(password))
except Exception, e:
log.error('Bad password: %s', e)
self._auth_error_msg = e.message
self.set_offline(self.Reasons.BAD_PASSWORD)
self.timer.stop()
raise
return conn
def _quit(self):
try:
self.conn.quit()
except Exception, e:
log.error('Error when disconnecting: %s', str(e))
if self.state != self.Statuses.ONLINE:
self.set_offline(self.Reasons.CONN_FAIL)
@cmdqueue()
def delete(self, msg):
SMTPEmailAccount.delete(self, msg)
conn = self.conn
if self.uidlworks:
uidl = conn.uidl()
#check if response is ok
mids = [mid for mid, uid in
[tuple(tup.split()) for tup in uidl[1]]
if uid == msg.id]
if mids:
mid = mids[0]
conn.dele(mid)
else:
hash, msgid, _ = msg.id.split("SHA")
newmsg = conn.retr(msgid)
#check if response is ok
newstring = "\n".join(newmsg[1])
newhash = sha1(newstring).hexdigest()
if hash == newhash:
conn.dele(msgid)
else:
num_emails, box_size = conn.stat()
num_emails = int(num_emails)
for i in xrange(num_emails):
emailhash = sha1("\n".join(conn.retr(str(i))[1])).hexdigest()
if hash == emailhash:
conn.dele(msgid)
break
def _get_options(self):
opts = SMTPEmailAccount._get_options(self)
opts.update(dict((a, getattr(self, a)) for a in
'popserver popport require_ssl'.split()))
return opts
| [
"mdougherty@tagged.com"
] | mdougherty@tagged.com |
71d9a83e464f82051a758c2d33a78ac365a5b19a | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingMedian_Seasonal_MonthOfYear_SVR.py | ba538fdda129fca51e45b6327146afc395216a1b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 173 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingMedian'] , ['Seasonal_MonthOfYear'] , ['SVR'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
9dfe0767d9b0883a76e8e18ee55623ed1b008ff2 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/worksheet/test_cond_format06.py | 22f05c90d94d6719ab020749a8069ae06371182b | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 3,825 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1:A4',
{'type': 'top',
'value': 10,
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'bottom',
'value': 10,
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'top',
'criteria': '%',
'value': 10,
'format': None,
})
worksheet.conditional_format('A1:A4',
{'type': 'bottom',
'criteria': '%',
'value': 10,
'format': None,
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>10</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>20</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>30</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>40</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A4">
<cfRule type="top10" priority="1" rank="10"/>
<cfRule type="top10" priority="2" bottom="1" rank="10"/>
<cfRule type="top10" priority="3" percent="1" rank="10"/>
<cfRule type="top10" priority="4" percent="1" bottom="1" rank="10"/>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
0ef7954996e42a6d743569121fb8420071fc267d | 4bcc9806152542ab43fc2cf47c499424f200896c | /tensorflow/python/tpu/tests/tpu_embedding_v2_correctness_hd_ragged_forward_test.py | 61a0f48cc240f958811a8776deacdbf661a074ee | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | tensorflow/tensorflow | 906276dbafcc70a941026aa5dc50425ef71ee282 | a7f3934a67900720af3d3b15389551483bee50b8 | refs/heads/master | 2023-08-25T04:24:41.611870 | 2023-08-25T04:06:24 | 2023-08-25T04:14:08 | 45,717,250 | 208,740 | 109,943 | Apache-2.0 | 2023-09-14T20:55:50 | 2015-11-07T01:19:20 | C++ | UTF-8 | Python | false | false | 1,441 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_v2_correctness_base_test
class TPUEmbeddingCorrectnessTest(
tpu_embedding_v2_correctness_base_test.TPUEmbeddingCorrectnessBaseTest):
@parameterized.parameters(
['sgd', 'adagrad', 'adam', 'ftrl', 'adagrad_momentum'])
def test_embedding(self, optimizer_name):
if optimizer_name != 'sgd':
self.skip_if_oss()
self._test_embedding(
optimizer_name, training=False, sparse=False, is_high_dimensional=True)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
25300826c517a97931ccdfcf3cbb34387deb7627 | 926621c29eb55046f9f59750db09bdb24ed3078e | /lib/surface/functions/call.py | 4c49d60775ab030dfbdc15919000d7c0e644be32 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/SDK | 525d9b29fb2e901aa79697c9dcdf5ddd852859ab | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | refs/heads/master | 2022-11-22T18:24:13.464605 | 2016-05-18T16:53:30 | 2016-05-18T16:53:30 | 282,322,505 | 0 | 0 | NOASSERTION | 2020-07-24T21:52:25 | 2020-07-24T21:52:24 | null | UTF-8 | Python | false | false | 2,026 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions call' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Call(base.Command):
"""Call function synchronously for testing."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'name', help='Name of the function to be called.',
type=util.ValidateFunctionNameOrRaise)
parser.add_argument(
'--data', default='',
help='Data passed to the function (JSON string)')
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Function call results (error or result with execution id)
"""
project = properties.VALUES.core.project.Get(required=True)
# TODO(b/25364251): Use resource parser.
name = 'projects/{0}/regions/{1}/functions/{2}'.format(
project, args.region, args.name)
client = self.context['functions_client']
messages = self.context['functions_messages']
return client.projects_regions_functions.Call(
messages.CloudfunctionsProjectsRegionsFunctionsCallRequest(
name=name,
callFunctionRequest=messages.CallFunctionRequest(data=args.data)))
| [
"richarddewalhalla@gmail.com"
] | richarddewalhalla@gmail.com |
7d13549756c02e6602e0017aaad1b57e3a4fc73f | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayMerchantOrderExternalPaychannelSyncResponse.py | fdba4427402ff120549d81280ebc20667670f062 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 488 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMerchantOrderExternalPaychannelSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayMerchantOrderExternalPaychannelSyncResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayMerchantOrderExternalPaychannelSyncResponse, self).parse_response_content(response_content)
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
4f197bd896198dc6f3e5e1120b2b8ad4f934e6cd | 5cc0041f6f85852032c65749bae48e4599297777 | /Web_Crawler/ch2/ch2_18.py | 4f46a8649e3816946a3977e7a1bd8e6745e1b761 | [] | no_license | Phil-Gith/Study | 226f84a86a4a8bf12258dbddd4bb22b1999c8e5b | 240269afa7f91469e3943214b2f4e5961fec8694 | refs/heads/master | 2023-01-04T19:37:59.473684 | 2020-10-15T03:03:48 | 2020-10-15T03:03:48 | 256,115,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # ch2_18.py
import csv
import matplotlib.pyplot as plt
from datetime import datetime
fn = 'TaipeiWeatherJan.csv'
with open(fn) as csvFile:
csvReader = csv.reader(csvFile)
headerRow = next(csvReader) # 讀取文件下一行
dates, highTemps = [], [] # 設定空串列
for row in csvReader:
highTemps.append(int(row[1])) # 儲存最高溫
currentDate = datetime.strptime(row[0], "%Y/%m/%d")
dates.append(currentDate)
plt.figure(dpi=80, figsize=(12, 8)) # 設定繪圖區大小
plt.plot(dates, highTemps) # 圖標增加日期刻度
plt.title("Weather Report, Jan. 2017", fontsize=24)
plt.xlabel("", fontsize=14)
plt.ylabel("Temperature (C)", fontsize=14)
plt.tick_params(axis='both', labelsize=12, color='red')
plt.show()
| [
"chhuanting@gmail.com"
] | chhuanting@gmail.com |
5bc62e0e2e3ec7a62672c600085786d1d11835ab | 3ba0413c1a935632425881d0a0104aef3fcfe7ec | /科学计算与时间/pandasTest2.py | 68f4aaaba6bf7eb3171a69a591c9310c7c5bdb72 | [] | no_license | lxc911008/Artificial-Intelligence | 624c9348bbfdb9a473b7c6c5dbd803d9f425cf79 | dd03f6e66c70ca4c24b3826ebd2deb51dfe66c36 | refs/heads/master | 2020-07-08T02:24:13.418944 | 2019-09-05T09:13:41 | 2019-09-05T09:13:41 | 203,538,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,339 | py | #!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:LXC
@file: pandasTest2.py
@time: 2019/09/02
"""
import pandas as pd
'''
#DataFrame
#DataFrame是二维数据结构,即数据以行和列的表格方式排列
#功能特点:
# 潜在的列是不同的类型
# 大小可变
# 标记轴(行和列)
# 可以对行和列执行算术运算
#DataFrame构造函数:pandas.DataFrame(data,index,columns,dtype,copy)
# data:数据采取各种形式,如ndarry,series,map,lists,dict,constant和另一个DataFrame
# index:对于行标签,如果没有传递索引值,默认np.arrange(n)
# columns:对于列标签,如果没有传递索引值,默认np.arrange(n)
# dtype:每列的数据类型
# copy:默认值为False,此命令用于复制数据
#通过二维数组创建
arr = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
df = pd.DataFrame(arr)
print(df)
print(df.index)
print(df.columns)
print(df.values)
df = pd.DataFrame(arr,index = list('qwer'),columns = list('ASD'))
print(df)
#通过字典创建,每个key,value为每一列的数据
dict = {'a':[1,2,3], 'b':[4,5,6],'c':[7,8,9]}
df2 = pd.DataFrame(dict,index = list('DEF'))
print(df2)
#通过index,columns 可以重置行列索引,reset_index,reset_columns,删除行列索引
df2.index = ['k','v','d']
print(df2)
#索引对象:
#不管是Series还是DataFrame对象,都有索引对象
#索引对象负责管理轴标签和其他元数据
#通过索引可以从Series,DataFrame中获取值或者对某个索引值进行重新赋值
#Series或者DataFrame的自动对齐功能是通过索引实现的
arr = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
df = pd.DataFrame(arr,index = list('qwer'),columns = list('ASD'))
print(df)
#数据获取:
#列数据获取:直接通过列索引获取
#行数据获取:需要通过ix方法获取对应行索引数据,ix为旧方法,一般使用loc、iloc
print(df['S']) #获取一列
print(df[['A','D']]) #获取两列
df['G'] = [6,7,8,10] #增加列,也可以修改列,
print(df)
df.pop('S') #列删除
print(df)
#行操作
print(df.loc['q']) #获取一行
print(df.loc[['q','e']]) #获取多行
print(df.loc[['q','e'],['A','G']]) #获取多行多列
#类似可进行行添加,行修改,通过drop可进行行删除
df = df.drop('q') #需要重新赋值,行删除不会更新原数组
print(df)
'''
| [
"you@example.com"
] | you@example.com |
82e9ce7c2a08c7f61f09a056c04674c79615cc12 | f0bb020e5e31a4aee953ca79b859590a5d97cf2c | /django_app/blog/migrations/0001_initial.py | 4edafc4b00e2845a84e57e36a65c5de31e36cb81 | [] | no_license | YunhoJung/djangogirls-Tutorial2 | 0d699df86a51414bfbb89bd61c9ede3d42bdee80 | ff6b55e2392e210efd329ce01a2c8573339eb2a2 | refs/heads/master | 2021-01-22T12:25:37.227801 | 2017-05-30T04:55:44 | 2017-05-30T04:55:44 | 92,724,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-30 01:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"saijdu2198@gmail.com"
] | saijdu2198@gmail.com |
7ca4dc8a99a59be4b601f5e534d123f4b73bc444 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/comm/telnetinst.py | 09dcd6dc82e4f398037421e1cead3beefd82b780 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 7,202 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class TelnetInst(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.comm.TelnetInst")
meta.moClassName = "commTelnetInst"
meta.rnFormat = "telnetinst"
meta.category = MoCategory.REGULAR
meta.label = "Telnet Service Inst"
meta.writeAccessMask = 0x3
meta.readAccessMask = 0x3
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.top.System")
meta.superClasses.add("cobra.model.comm.Comp")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.comm.Shell")
meta.rnPrefixes = [
('telnetinst', False),
]
prop = PropMeta("str", "adminSt", "adminSt", 28510, PropCategory.REGULAR)
prop.label = "Admin State"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "disabled"
prop._addConstant("disabled", "disabled", 2)
prop._addConstant("enabled", "enabled", 1)
meta.props.add("adminSt", prop)
prop = PropMeta("str", "annotation", "annotation", 38601, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 40740, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 28508, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
prop.defaultValue = "telnet"
prop.defaultValueStr = "telnet"
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "port", "port", 28509, PropCategory.REGULAR)
prop.label = "Port"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 65535)]
prop.defaultValue = 23
prop.defaultValueStr = "23"
meta.props.add("port", prop)
prop = PropMeta("str", "proto", "proto", 1300, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "tcp"
prop._addConstant("all", "all", 3)
prop._addConstant("none", "none", 0)
prop._addConstant("tcp", "tcp", 1)
prop._addConstant("udp", "udp", 2)
meta.props.add("proto", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Fabric"
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
fe2c50b9178fb07cf4ac84ed50234e6ccee3ea5c | a6b0e4b92a404e2a14591f025f908a8a567f1e17 | /pgm/representation/MN.py | ba861a6419c3726088884e6e6107e82a9ac7425b | [
"MIT"
] | permissive | koriavinash1/pgm | 6cdde0aa447503837ef74e4d34d2799a5e0a5b8f | 89e11b61f7141a75d8991ff4ea229ef66d7a4a0c | refs/heads/master | 2022-07-10T15:55:20.672986 | 2020-06-30T05:44:13 | 2020-06-30T05:44:13 | 242,904,930 | 4 | 2 | MIT | 2022-06-22T01:50:20 | 2020-02-25T03:56:24 | Python | UTF-8 | Python | false | false | 4,200 | py | import numpy as np
class Graph(object):
"""
Adjancency list and Adjancency matrix
representation of the graph based on the input
"""
def __init__(self, type='list'):
"""
type: captures the type of graph
allowed values ['list', 'matrix']
"""
self.type = type.lower()
if self.type not in ['list', 'matrix']:
raise ValueError("Invalid type found, allowed values are: ['list', 'matrix']")
self.graph = {'node': [],
'node_idx': [],
'node_weightage': [],
'edge': [],
'edge_weightage': []}
def add_node(self, node, weightage=1):
"""
adds node in the graph also helps in
assigning the weightage to each node
node: can be ['int', 'str']
weightage: ['int' or 'float']
"""
if not node in self.graph['node']:
node_idx = len(self.graph['node'])
self.graph['node'].append(node)
self.graph['node_weightage'].append(weightage)
self.graph['node_idx'].append(node_idx)
self.graph['edge'].append([])
self.graph['edge_weightage'].append([])
else:
node_idx = self.graph['node_idx'][np.where(np.array(self.graph['node']) == node)[0][0]]
return node_idx
def add_edge(self, nodeA, nodeB, edge_weightage=1, nodeA_weightage=1, nodeB_weightage=1):
"""
adds a directed edge from nodeA to nodeB in
the graph also helps in assigning the weightage
to each edge
nodeA -> nodeB
nodeA: can be ['int', 'str']
nodeB: can be ['int', 'str']
nodeA_weightage: ['int' or 'float']
nodeA_weightage: ['int' or 'float']
edge_weightage: ['int' or 'float']
"""
nodeA_idx = self.add_node(nodeA, nodeA_weightage)
nodeB_idx = self.add_node(nodeB, nodeB_weightage)
self.graph['edge'][nodeA_idx].append(nodeB_idx)
self.graph['edge_weightage'][nodeA_idx].append(edge_weightage)
def delete_edge(self, nodeA, nodeB):
"""
deletes the directed edge from nodeA to nodeB
function will remoce edge: nodeA -> nodeB
nodeA: can be ['int', 'str']
nodeB: can be ['int', 'str']
"""
nodeA_idx = self.graph['node_idx'][np.where(np.array(self.graph['node']) == nodeA)[0][0]]
nodeB_idx = self.graph['node_idx'][np.where(np.array(self.graph['node']) == nodeB)[0][0]]
edge_idx = np.where(np.array(self.graph['edge'][nodeA_idx]) == nodeB_idx)[0][0]
self.graph['edge'][nodeA_idx].pop(edge_idx)
self.graph['edge_weightage'][nodeA_idx].pop(edge_idx)
def delete_node(self, node):
"""
deletes the node from the graph
also removes all the edges connecting to that specific node
example:
nodeA -> nodeB <- nodeC
deleteing nodeB will remove
edges:
nodeA -> nodeB
nodeC -> nodeB
node:
nodeB
node: can be ['int', 'str']
"""
node_idx = self.graph['node_idx'][np.where(np.array(self.graph['node']) == node)[0][0]]
for nidx, eidxs in enumerate(self.graph['edge']):
for j, eidx in enumerate(eidxs):
if eidx == node_idx:
self.graph['edge'][nidx].pop(j)
self.graph['edge_weightage'][nidx].pop(j)
for j, eidx in enumerate(eidxs):
if eidx > node_idx:
self.graph['edge'][nidx][j] = self.graph['edge'][nidx][j] - 1
for j in self.graph['node_idx'][node_idx + 1:]:
self.graph['node_idx'][j] = j - 1
self.graph['node'].pop(node_idx)
self.graph['node_idx'].pop(node_idx)
self.graph['edge'].pop(node_idx)
self.graph['edge_weightage'].pop(node_idx)
self.graph['node_weightage'].pop(node_idx)
| [
"koriavinash1@gmail.com"
] | koriavinash1@gmail.com |
aa8ad5d953b4effedd92cc7e8c0979f43318e818 | 8ae5b5495e2223216dbfc4d65d8abe9920d5448f | /test/integration/conftest.py | 01db91f58b5faec0cc94e96fc94c6c43aba8af4a | [
"MIT"
] | permissive | neunkasulle/flynt | 6c09d41e978868f393cb82182ef81d18d20b1634 | 7f73c7cb4d3e00cc65d9e676583f532f3ed9b302 | refs/heads/master | 2020-07-03T06:16:09.429070 | 2019-08-09T14:00:30 | 2019-08-09T14:01:55 | 201,815,878 | 0 | 0 | null | 2019-08-11T21:17:28 | 2019-08-11T21:17:28 | null | UTF-8 | Python | false | false | 1,323 | py | import pytest
all_files = pytest.fixture(params=[
"CantAffordActiveException.py",
"all_named.py",
"def_empty_line.py",
"digit_ordering.py",
"dict_func.py",
"double_conv.py",
"first_string.py",
"hard_percent.py",
"implicit_concat.py",
"implicit_concat_comment.py",
"implicit_concat_named1.py",
"implicit_concat_named2.py",
"indexed_fmt_name.py",
"indexed_percent.py",
"long.py",
"multiline.py",
"multiline_1.py",
"multiline_2.py",
"multiline_3.py",
"multiline_twice.py",
"multiple.py",
"named_inverse.py",
"no_fstring_1.py",
"no_fstring_2.py",
"percent_numerics.py",
"percent_op.py",
"percent_strings.py",
"raw_string.py",
"regression_flask.py",
"simple.py",
"simple_comment.py",
"simple_docstring.py",
"simple_format.py",
"simple_format_double_brace.py",
"simple_indent.py",
"simple_percent.py",
"simple_percent_comment.py",
"simple_start.py",
"simple_str_newline.py",
"simple_str_return.py",
"simple_str_tab.py",
"slash_quotes.py",
"some_named.py",
"string_in_string.py",
"tuple_in_list.py",
"two_liner.py"
])
# @pytest.fixture(params=["double_conv.py"])
@all_files
def filename(request):
yield request.param | [
"ikkamens@amazon.com"
] | ikkamens@amazon.com |
e0c375a7c9aeb8bdb737e925027a1d23aa4a0137 | 375c05dc48eacb4bdaabe8930bd39c133ff8c159 | /web/tudata/fetcher/fetch_Transaction.py | 6df4767604c4bb2df129462a14088c737bff84c5 | [] | no_license | wangshoujunnew/ai_qi | e68e719b772c9762b64c68bd4537793df8be089a | a6dc1cd326410f522af59b657f99b675c54b4880 | refs/heads/master | 2021-07-17T00:36:28.977847 | 2021-07-15T23:41:48 | 2021-07-15T23:41:48 | 132,225,071 | 0 | 0 | null | 2018-05-05T07:27:20 | 2018-05-05T07:27:20 | null | UTF-8 | Python | false | false | 2,004 | py | import pandas as pd
import tushare as ts
from utils.strutils import nextDayStr, nextMinStr
from ..db import read_sql, TN_TRANSACTION_D, TN_TRANSACTION_5MIN
'''
个股历史交易记录
相关接口: get_hist_data
__tablename__ = 'transaction'
id = Column(Integer, primary_key=True, autoincrement=True)
# 代码
code = Column(Integer, index=True)
# 日期 eg: 2017-11-24
date = Column(String, index=True)
# 开盘价 eg: 24.10
open = Column(Float)
# 最高价 eg: 24.70
high = Column(Float)
# 收盘价 eg: 24.45
close = Column(Float)
# 最低价 eg: 24.09
low = Column(Float)
# 成交量 eg: 53160.52
volume = Column(Float)
# 价格变动 eg: 0.43
price_change = Column(Float)
# 涨跌幅 eg: 1.79
p_change = Column(Float)
# 5日均价 eg: 24.822
ma5 = Column(Float)
# 10日均价 eg: 26.441
ma10 = Column(Float)
# 20日均价 eg: 28.300
ma20 = Column(Float)
# 5日均量 eg: 80676.85
v_ma5 = Column(Float)
# 10日均量 eg: 117984.89
v_ma10 = Column(Float)
# 20日均量 eg: 175389.32
v_ma20 = Column(Float)
# 换手率 eg: 1.33
turnover = Column(Float)
'''
def start_date(code, ktype):
try:
newest_date = get_newest_date(code, ktype)
if ktype is '5':
r = nextMinStr(newest_date)
else:
r = nextDayStr(newest_date)
return r
except:
return None
# @cache()
def fetch_transaction(code, date, ktype):
'''
获取交易数据
:param code:
:param date:
:param ktype:
:return:
'''
df = ts.get_hist_data(code, start=date, ktype=ktype)
if df is not None:
df['code'] = code
return df
def get_newest_date(code, ktype='D'):
'''
获取最新的记录日期
:param code:
:param ktype:
:return:
'''
if ktype is '5':
table = TN_TRANSACTION_5MIN
else:
table = TN_TRANSACTION_D
df = read_sql("select date from '%s' where code = '%s'" % (table, str(code)))
# 当前记录中 当前股票的最新记录日期
return df['date'][0]
| [
"lijianan@hujiang.com"
] | lijianan@hujiang.com |
b57ad36b236cc2b3aba872cc84e39ffef40188dd | 9745e5d8acae70bcdd7011cc1f81c65d3f5eed22 | /problem-solving/Implementation/Encryption/solutions.py | edc1efb3c322941806726ff9e9af711e1ec42f2e | [] | no_license | rinleit/hackerrank-solutions | 82d71b562d276ec846ab9a26b3e996c80172f51e | 519a714c5316892dce6bd056b14df5e222078109 | refs/heads/master | 2022-11-10T05:08:11.185284 | 2020-07-02T01:34:35 | 2020-07-02T01:34:35 | 254,403,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the encryption function below.
def encryption(s):
n = math.sqrt(len(s))
row, col = math.floor(n), math.ceil(n)
encryption = []
for i in range(0, len(s), col):
temp_str = s[i:i+col].ljust(col, " ")
encryption += [temp_str]
results = map(lambda s: ''.join(s).strip(), list(zip(*encryption)))
return " ".join(results)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = encryption(s)
fptr.write(result + '\n')
fptr.close()
| [
"rinle.it@gmail.com"
] | rinle.it@gmail.com |
9553e17b83697e7bda70cb8d700e4a7ed33ba705 | 1b614c43af7e636c6bcdf828af4e795b4df1daee | /system/base/v86d/actions.py | 4610e02dcb06573508f5a53d9ba3b6ad5896e61b | [] | no_license | pisilinux/core | 1a86b88de182bfcf05fc79a3329644d83379bc11 | b4eb5a301f185f0e644f32544a25f4ed68b6e158 | refs/heads/master | 2023-08-25T10:46:54.168655 | 2023-08-22T20:25:16 | 2023-08-22T20:25:16 | 31,252,264 | 38 | 116 | null | 2023-09-14T16:09:40 | 2015-02-24T09:09:49 | Python | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.rawConfigure("--without-klibc \
--with-x86emu")
def build():
autotools.make("KDIR=/usr")
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "README")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
0400fa60ce904d84a8b6414c1c017b47b0d07668 | 47128c6ff1277eedf851670d33f7a288fdfe2246 | /win32/focus_window.py | 2d29b8632382ea0d70d2bd2269cc675ef1419135 | [] | no_license | chati757/python-learning-space | 5de7f11a931cf95bc076473da543331b773c07fb | bc33749254d12a47523007fa9a32668b8dc12a24 | refs/heads/master | 2023-08-13T19:19:52.271788 | 2023-07-26T14:09:58 | 2023-07-26T14:09:58 | 83,208,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import win32gui
import win32con
def focus_window(title):
hwnd = win32gui.FindWindow(None, title)
if hwnd:
win32gui.ShowWindow(hwnd, win32con.SW_RESTORE)
win32gui.SetForegroundWindow(hwnd)
else:
print("Window with title '{}' not found!".format(title))
# Example usage:
focus_window("book_val_graph : test")
| [
"chati757@users.noreply.github.com"
] | chati757@users.noreply.github.com |
8db4f80121ff82a3a2b3c63c663b8382b802b3e9 | 21a92e72448715510d509ab0ec07af37f388013a | /book/problems/multOrPlus.py | 3a85af55bcf1e10f4cbaab0dde979c4ce1b5909e | [] | no_license | chlee1252/dailyLeetCode | 9758ad5a74997672129c91fb78ecc00092e1cf2a | 71b9e3d82d4fbb58e8c86f60f3741db6691bf2f3 | refs/heads/master | 2023-01-22T02:40:55.779267 | 2020-12-03T15:01:07 | 2020-12-03T15:01:07 | 265,159,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | def solution(S):
result = 0
for num in S:
num = int(num)
if num < 2 or result < 2:
result += num
else:
result *= num
# print(result)
return result
print(solution("1111"))
print(solution("011111"))
print(solution("567"))
print(solution("02984"))
print(solution("012")) | [
"chlee1252@gmail.com"
] | chlee1252@gmail.com |
abf68e96f3cae06820575d013f7b29a5b835f095 | 178fc17fdf36a52265807e4c48c80a40727e6c78 | /Python/detect_html/detect.py | d08e5c24889cd3eceb04c3e26a040cdad9226317 | [] | no_license | Algorant/HackerRank | da61820e7b2ffe274203b9fd8f4348c79a1a0451 | 259fd39747898c157144891df26a1d0b812f2a46 | refs/heads/master | 2023-02-03T06:41:46.035515 | 2020-12-20T02:18:15 | 2020-12-20T02:18:15 | 285,880,833 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | '''
Print the HTML tags and attribute values in order of their
occurrence from top to bottom in the given snippet
'''
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print(tag)
for attr in attrs:
print("->", attr[0], ">", attr[1])
parser = MyHTMLParser()
for i in range(int(input())):
parser.feed(input())
| [
"Skantastico@gmail.com"
] | Skantastico@gmail.com |
7a9e78e6b93e0680774b4624d14bdd1a8ec0be7c | bcc199a7e71b97af6fbfd916d5a0e537369c04d9 | /leetcode/solved/85_Maximal_Rectangle/solution.py | 7438d49f85d870ec1bf91452b9ec12ee791a3528 | [] | no_license | sungminoh/algorithms | 9c647e82472905a2c4e505c810b622b734d9d20d | 1389a009a02e90e8700a7a00e0b7f797c129cdf4 | refs/heads/master | 2023-05-01T23:12:53.372060 | 2023-04-24T06:34:12 | 2023-04-24T06:34:12 | 87,406,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
Given a rows x cols binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area.
Example 1:
Input: matrix = [["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]]
Output: 6
Explanation: The maximal rectangle is shown in the above picture.
Example 2:
Input: matrix = [["0"]]
Output: 0
Example 3:
Input: matrix = [["1"]]
Output: 1
Constraints:
rows == matrix.length
cols == matrix[i].length
1 <= row, cols <= 200
matrix[i][j] is '0' or '1'.
"""
import sys
from typing import List
import pytest
class Solution:
def maximalRectangle(self, matrix):
"""08/14/2018 22:15"""
def largestRectangleArea(heights):
ret = 0
stack = []
heights.append(0)
for i, h in enumerate(heights):
while stack and heights[stack[-1]] >= h:
ph = heights[stack.pop()]
w = i - (stack[-1] if stack else -1) - 1
ret = max(ret, ph * w)
stack.append(i)
return ret
if not matrix:
return 0
h, w = len(matrix), len(matrix[0])
m = [[0]*w for _ in range(h)]
ret = 0
heights = [0] * w
for i in range(h):
for j in range(w):
heights[j] = (heights[j] + 1) if int(matrix[i][j]) == 1 else 0
ret = max(ret, largestRectangleArea(heights))
return ret
def maximalRectangle(self, matrix: List[List[str]]) -> int:
if not matrix or not matrix[0]:
return 0
m, n = len(matrix), len(matrix[0])
def find_max_area(nums):
nums += [0]
ret = 0
stack = []
for i, n in enumerate(nums):
while stack and nums[stack[-1]] >= n:
height = nums[stack.pop()]
j = -1 if not stack else stack[-1]
width = (i-1) - j
ret = max(ret, width*height)
stack.append(i)
return ret
ret = 0
row = [0]*n
for i in range(m):
for j in range(n):
if matrix[i][j] == '0':
row[j] = 0
else:
row[j] += 1
ret = max(ret, find_max_area(row))
return ret
@pytest.mark.parametrize('matrix, expected', [
([["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]], 6),
([["0"]], 0),
([["1"]], 1),
([["0","0","1"],["1","1","1"]], 3),
([["0","1","1","0","1"],
["1","1","0","1","0"],
["0","1","1","1","0"],
["1","1","1","1","0"],
["1","1","1","1","1"],
["0","0","0","0","0"]], 9),
])
def test(matrix, expected):
assert expected == Solution().maximalRectangle(matrix)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| [
"smoh2044@gmail.com"
] | smoh2044@gmail.com |
c543f3d0c1f2509782e52b90f80b8f5dd7672456 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/320/usersdata/280/89051/submittedfiles/lecker.py | 772a59e3b7905b2305c3d504790e7ce7f9ece420 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # -*- coding: utf-8 -*-
import math
n1=int(input("Digite o 1º número: "))
n2=int(input("Digite o 2º número: "))
n3=int(input("Digite o 3º número: "))
n4=int(input("Digite o 4º número: "))
cont=0
if n1>n2:
cont=cont + 1
if n2>n3 and n2>n1:
cont=cont + 1
if n3>n4 and n3>n2:
cont=cont + 1
if n4>n3:
cont=cont + 1
if cont <=1:
print("S")
if cont > 1:
print("N") | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c3136dacb8b11aba8a6556840042479ed14b5368 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SSD/tools/eval_metric.py | dd4ba6835e90aa972f64ba7f0abdb998eacae91d | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,129 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in ['interval', 'tmpdir', 'start', 'gpu_collect']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
5da66ebf5494eab70d995002f3374ddd1c2a3cc1 | 1686d1e263a77c44f2e711450bcd32fc1feb94c3 | /MySite001/src/CS/urls.py | 36462b2376dc0f68d80191d9463bf93fe227ff90 | [] | no_license | wesamalnobani/Django_My_Exercises | 7297e41bc2f47d854d0d62dd489b7eb08365ffd2 | 10a4accebf03df5e869843f7bac941afe93e6509 | refs/heads/master | 2020-04-22T06:23:36.865853 | 2019-02-11T19:32:49 | 2019-02-11T19:32:49 | 162,743,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | from django.urls import path
from .views import home,test01,test02,test03,test04,page01,page02,page03,create_user,backend01,update_user,backend02,backend03
urlpatterns = [
path('',home,name='Index'),
path('Python/',page01,name='Python'),
path('C++/',page02,name='C++'),
path('JavaScript/',page03,name='JavaScript'),
path('test01/', test01,name='test01'),
path('test02/<int:n1>/<int:n2>',test02,name='test02'),
path('test03/<str:a>/<int:b>',test03,name='test03'),
path('test04/<str:a>/<int:b>/<int:c>',test04,name='test04'),
path('Form_Users/',create_user,name='Form_Users'),
path('backend01/',backend01,name='backend01'),
path('update_user/<int:id>/',update_user,name='update_user'),
path('backend02/<int:id>',backend02,name='backend02'),
path('backend03/<int:id>',backend03,name='backend03'),
]
| [
"wesam.alnobani@gmail.com"
] | wesam.alnobani@gmail.com |
f35e3a1f17961d8a4d45a9d205b7169a7add01e8 | 6d45ba4adff74b2cb1b6764dc684f37407b41ba9 | /PirateBoxMessageBoard/message/forms.py | b3fcbd48a2a4cea507ac333c599047e345d8e20a | [] | no_license | bussiere/PirateBoxMessageBoard | bbf478af1886caf811f38802bde5528593bba2c4 | 8626a8a44d5bdbf06486fac65682a50e4209396d | refs/heads/master | 2021-01-23T11:56:22.905167 | 2013-03-08T16:33:08 | 2013-03-08T16:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from django import forms
class MessageForm(forms.Form):
Pseudo = forms.CharField(max_length=100)
Description = forms.CharField(max_length=100)
Message = forms.CharField(widget=forms.Textarea(attrs={'cols': 20, 'rows': 20}))
| [
"bussiere@gmail.com"
] | bussiere@gmail.com |
17e8d0fbafe8a71799bcab09f7c00a4a0e7adb30 | 415b5c3ccf75b55c39dadba0773419a21ebeb9d1 | /moff/node/list_item_node.py | 93ba44b8eea6857424c91589a9335210d2665e77 | [
"MIT"
] | permissive | Tikubonn/moff | d8e5e2fedd5b343883050ae79c26a85e22a4d7f1 | 1c363f60959138311068177fca177d0f0dc97380 | refs/heads/master | 2020-05-17T05:23:59.110858 | 2019-06-07T03:47:21 | 2019-06-07T03:47:21 | 183,534,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py |
from .template import CollectionNode
class ListItemNode (CollectionNode):
# override
def write(self, stream):
stream.write("<li>")
super().write(stream)
stream.write("</li>")
| [
"https://twitter.com/tikubonn"
] | https://twitter.com/tikubonn |
616d7543696d84644ca888794262ebffdc7033c9 | a9dc42e9f54b549fcdd695817e347abfd8f2869f | /snap_scripts/downscaling_10min/downscaling_10min_OLD_KEEP/wrap_downscaler_cmip5_slurm.py | eb049f4160cd2c77edd6a55e7ab458c034cdd5ab | [
"MIT"
] | permissive | yusheng-wang/downscale | 2e77d070115ead3034c154d29f1c533976228f13 | 3fe8ea1774cf82149d19561ce5f19b25e6cba6fb | refs/heads/master | 2023-04-10T03:25:08.806859 | 2019-09-21T17:34:35 | 2019-09-21T17:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,628 | py | # # # # #
# wrap downscaler for running on slurm
# # # # #
def run_model( fn, base_dir, variable, model, scenario, units, metric ):
import os, subprocess
head = '#!/bin/sh\n' + \
'#SBATCH --ntasks=32\n' + \
'#SBATCH --nodes=1\n' + \
'#SBATCH --ntasks-per-node=32\n' + \
'#SBATCH --account=snap\n' + \
'#SBATCH --mail-type=FAIL\n' + \
'#SBATCH --mail-user=malindgren@alaska.edu\n' + \
'#SBATCH -p main\n'
script_path = '/workspace/UA/malindgren/repos/downscale/snap_scripts/downscaling_10min/downscale_cmip5.py'
with open( fn, 'w' ) as f:
command = ' '.join([ 'ipython', script_path,\
'--', '-b', base_dir, '-m', model, '-v', variable, '-s', scenario, '-u', units, '-met', metric ])
f.writelines( head + "\n" + command + '\n' )
subprocess.call([ 'sbatch', fn ])
return 1
if __name__ == '__main__':
import os, glob, itertools, subprocess
base_dir = '/workspace/Shared/Tech_Projects/DeltaDownscaling/project_data'
models = [ 'GFDL-CM3', 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'CCSM4' ]
variables = [ 'tas','pr' ]
scenarios = [ 'historical', 'rcp26', 'rcp45', 'rcp60', 'rcp85' ]
path = os.path.join( base_dir,'downscaled_10min','slurm_log' )
if not os.path.exists( path ):
os.makedirs( path )
os.chdir( path )
for variable, model, scenario in itertools.product( variables, models, scenarios ):
if variable == 'pr':
units = 'mm'
metric = 'total'
else:
units = 'C'
metric = 'mean'
fn = os.path.join( path, 'slurm_run_downscaler_'+'_'.join([variable, model, scenario])+'.slurm' )
_ = run_model( fn, base_dir, variable, model, scenario, units, metric )
| [
"lindgren.mike@gmail.com"
] | lindgren.mike@gmail.com |
e4ca8247c56c5c251f62479a4a6b66f802ab823b | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_zeebonk_main.py | f644acaf0f26faca4d4ab13b693e4f0cc6eb29a1 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 966 | py | import fileinput
from itertools import product
from math import sqrt
def get_devisors(item):
item = ''.join(item)
devisors = []
for base in xrange(2, 11):
number = int(''.join(item), base)
for i in xrange(2, int(sqrt(number))):
if number % i == 0:
devisors.append(i)
break
return devisors
if __name__ == '__main__':
lines = fileinput.input()
next(lines)
for case, line in enumerate(lines):
length, amount = tuple(map(int, line.strip().split(' ')))
results = []
for item in product(['0', '1'], repeat=length - 2):
item = ['1'] + list(item) + ['1']
divisors = get_devisors(item)
if len(divisors) != 9:
continue
results.append((item, divisors))
if len(results) >= amount:
break
print "Case #%d:" % (case + 1)
for item, de in results:
print ''.join(item), ' '.join(str(d) for d in de)
# 100011 5 13 147 31 43 1121 73 77 629
# 111111 21 26 105 1302 217 1032 513 13286 10101
# 111001 3 88 5 1938 7 208 3 20 11
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
f844ff6861d32d92c4dd2c95ef5ce3058bc9e100 | 3d5bcd57b893c95bbcbfafe77bbc33c65432c9ed | /Algorithms/LeetCode/L0043multiply.py | 4b26d99cfae10351fa46ef39c154f388950b80a0 | [] | no_license | arunachalamev/PythonProgramming | c160f34c7cb90e82cd0d4762ff9dcb4abadf9c1c | ea188aaa1b72511aeb769a2829055d0aae55e73e | refs/heads/master | 2021-06-04T03:50:37.976293 | 2020-11-12T19:52:28 | 2020-11-12T19:52:28 | 97,364,002 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py |
def multiply(num1, num2):
product = [0] * (len(num1) + len(num2))
position = len(product)-1
for x in num1[::-1]:
temp = position
for y in num2[::-1]:
product[temp] += int(x) * int(y)
product[temp-1] += product[temp] // 10
product[temp] = product[temp] %10
temp -=1
position -= 1
print (product)
pos = 0
while pos<len(product)-1 and product[pos] ==0:
pos += 1
return ''.join(map(str,product[pos:]))
print (multiply('12','11'))
| [
"arunachalamev@gmail.com"
] | arunachalamev@gmail.com |
340da06a46d358ee325be8fc8e4307cbe9cd76ee | 956b32fe691b6e364464b4e4bf796c306ce26783 | /tests/fixture_data/server_groups.py | 553bc57bc7ac5e92537a3b68be6bc0d1f73cf08a | [] | no_license | zzpu/novaclient | 8be4c26dc0587d927f1116a2b9ff1b841987a3a0 | 4040cae0ce62d35c4f128686be67871536200ac5 | refs/heads/master | 2021-01-12T10:23:49.413848 | 2016-12-14T09:05:52 | 2016-12-14T09:05:52 | 76,442,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | #coding:utf-8
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.openstack.common import jsonutils
from novaclient.tests.fixture_data import base
class Fixture(base.Fixture):
base_url = 'os-server-groups'
def setUp(self):
super(Fixture, self).setUp()
server_groups = [
{
"members": [],
"metadata": {},
"id": "2cbd51f4-fafe-4cdb-801b-cf913a6f288b",
"policies": [],
"name": "ig1"
},
{
"members": [],
"metadata": {},
"id": "4473bb03-4370-4bfb-80d3-dc8cffc47d94",
"policies": ["anti-affinity"],
"name": "ig2"
},
{
"members": [],
"metadata": {"key": "value"},
"id": "31ab9bdb-55e1-4ac3-b094-97eeb1b65cc4",
"policies": [], "name": "ig3"
},
{
"members": ["2dccb4a1-02b9-482a-aa23-5799490d6f5d"],
"metadata": {},
"id": "4890bb03-7070-45fb-8453-d34556c87d94",
"policies": ["anti-affinity"],
"name": "ig2"
}
]
headers = {'Content-Type': 'application/json'}
self.requests.register_uri('GET', self.url(),
json={'server_groups': server_groups},
headers=headers)
server = server_groups[0]
server_j = jsonutils.dumps({'server_group': server})
def _register(method, *args):
self.requests.register_uri(method, self.url(*args), text=server_j)
_register('POST')
_register('POST', server['id'])
_register('GET', server['id'])
_register('PUT', server['id'])
_register('POST', server['id'], '/action')
self.requests.register_uri('DELETE', self.url(server['id']),
status_code=202)
| [
"719184289@qq.com"
] | 719184289@qq.com |
0280fd778cf57e2418830a4132806e8fa7817cc0 | 70f6e78d471a91323b11f730c6f04afee177daf1 | /tools/pytorch_load.py | 8db5c4e4fe9219b478048120ad9e171dee3818e7 | [] | no_license | orange-eng/Image_enhancement | 72a6e2cf30c8e536d8d57c0de5643d94cadce2d9 | 2fcfdb9a4a05d86d1255e250fbfd46ab740589fd | refs/heads/main | 2023-04-20T01:47:43.896157 | 2021-05-17T08:20:53 | 2021-05-17T08:20:53 | 323,057,119 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | import torch
import torch.nn as nn
class Net_old(nn.Module):
def __init__(self):
super(Net_old, self).__init__()
self.nets = nn.Sequential(
torch.nn.Conv2d(1, 2, 3),
torch.nn.ReLU(True),
torch.nn.Conv2d(2, 1, 3),
torch.nn.ReLU(True),
torch.nn.Conv2d(1, 1, 3)
)
def forward(self, x):
return self.nets(x)
class Net_new(nn.Module):
def __init__(self):
super(Net_old, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 2, 3)
self.r1 = torch.nn.ReLU(True)
self.conv2 = torch.nn.Conv2d(2, 1, 3)
self.r2 = torch.nn.ReLU(True)
self.conv3 = torch.nn.Conv2d(1, 1, 3)
def forward(self, x):
x = self.conv1(x)
x = self.r1(x)
x = self.conv2(x)
x = self.r2(x)
x = self.conv3(x)
return x
network = Net_old()
torch.save(network.state_dict(), 't.pth') #存储模型的参数到t.pth文件中
pretrained_net = torch.load('t.pth') #pretrained_net文件是一个OrderedDict类型文件,存放各种参数
#print(pretrained_net)
# for key, v in enumerate(pretrained_net):
# print(key, v)
i = 0
for name, module in network.named_modules():
i = i + 1
print(i,module)
for p_name, p_tensor in module.named_parameters():
print(p_name)
# for key, v in enumerate(pretrained_net):
# print(key,v) | [
"972353371@qq.com"
] | 972353371@qq.com |
89de725f6f8efd1df760f3d62641dc5d9b26ac12 | ea4ca37cf33ffb6e90c492802f61c2ec303e52f6 | /BinaryPattern/train_binary.py | e2281b5b3c57826d1df26df926819ab469dc662c | [] | no_license | webstorage119/Iris_pattern_classification | a59ac27864078c5a2475a4bfcf743ce412c09aa9 | 6050627f39034638239b83ccc9f752a8557ff0e9 | refs/heads/master | 2023-04-08T18:50:46.723478 | 2021-04-18T05:38:55 | 2021-04-18T05:38:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,545 | py | from BinaryPattern.util.constants import *
from BinaryPattern.util.dataloader import DataLoader
from BinaryPattern.util.output import makeoutput, make_dir
from BinaryPattern.util.gendataloader import ImageGenerator
from keras import backend as K
from tensorflow.keras.losses import binary_crossentropy, categorical_crossentropy, sparse_categorical_crossentropy
from CNNUtil.customcallback import CustomCallback
from CNNModels.VGG.model.smallervggnet import SmallerVGGNet
from CNNModels.VGG.model.vgg16v1 import VGG_16
from CNNModels.MobileNet.model.mobilenet import MobileNetBuilder
from albumentations import (Compose,
HorizontalFlip, VerticalFlip, ShiftScaleRotate,
RandomRotate90, Transpose, RandomSizedCrop, RandomContrast, RandomGamma, RandomBrightness)
input_shape = (FLG.HEIGHT, FLG.WIDTH, FLG.DEPTH)
if K.image_data_format() == "channels_first":
input_shape = (FLG.DEPTH, FLG.HEIGHT, FLG.WIDTH)
make_dir('./output/'+FLG.PROJECT_NAME + '/model_saved')
make_dir('./output/'+FLG.PROJECT_NAME + '/validation_Report')
# model, model_size = efficientNet_factory('efficientnet-b1', load_weights=None, input_shape=(FLG.WIDTH, FLG.HEIGHT, FLG.DEPTH), classes=2)
model = SmallerVGGNet.build(width=FLG.WIDTH, height=FLG.HEIGHT,
depth=FLG.DEPTH, classes=FLG.CLASS_NUM, finalAct="softmax")
# ====== SmallerVGGNet ====
# Total params: 29,777,794
# Trainable params: 29,774,914
# Non-trainable params: 2,880
# model = SmallerVGGNet.buildv2(input_shape=input_shape, classes=2, finalAct="softmax")
# ====== SmallerVGGNet 2 ====
# `Total params: 7,663,618
# Trainable params: 7,659,970
# Non-trainable params: 3,648
# -----> 120 epoch 에서도 acc 0.3.....
# model = SmallerVGGNet.buildv3(input_shape=input_shape, classes=2, finalAct="softmax")
# ====== SmallerVGGNet 2 ====
# Total params: 7,657,218
# Trainable params: 7,655,106
# Non-trainable params: 2,112
# -----> 120 epoch 에서도 acc 0.4
# model = VGG_16(width=FLG.WIDTH, height=FLG.HEIGHT, depth=FLG.DEPTH, classes=2)
# model = model = ResnetBuilder.build_resnet_152(input_shape, 2)
# model = MobileNetBuilder.build_mobilenet_v1(input_shape=input_shape, classes=2)
# model = MobileNetBuilder.build_mobilenet_v2(input_shape=input_shape, classes=2)
# ====== mobilenet_v2 ====
# Total params: 1,029,010
# Trainable params: 1,017,986
# Non-trainable params: 11,024
model.summary()
model.compile(loss=categorical_crossentropy, optimizer='rmsprop', metrics=['accuracy'])
list_callbacks = CustomCallback.callback(FLG.PATIENCE, FLG.CKPT_W)
AUGMENTATIONS_TRAIN = Compose([
HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(p=0.8),
RandomRotate90(p=0.8), Transpose(p=0.5),
RandomContrast(p=0.5), RandomGamma(p=0.5), RandomBrightness(p=0.5)])
AUGMENTATIONS_TEST = Compose([
VerticalFlip(p=0.5)])
dataLoader = {
'TrainGenerator': ImageGenerator(FLG.DATA_DIR + '/train', augmentations=AUGMENTATIONS_TRAIN),
'ValGenerator': ImageGenerator(FLG.DATA_DIR + '/test', augmentations=AUGMENTATIONS_TEST)}
train_generator = dataLoader.get('TrainGenerator')
val_generator = dataLoader.get('ValGenerator')
hist = model.fit_generator(train_generator,
validation_data=val_generator,
steps_per_epoch=len(train_generator)*1,
validation_steps=len(val_generator)*1,
epochs=FLG.EPOCHS, verbose=1, callbacks=list_callbacks)
x_val, y_val = DataLoader.test_load_data(FLG.DATA_DIR + '/test')
makeoutput(x_val, y_val, model, hist) | [
"jslee_314@naver.com"
] | jslee_314@naver.com |
104e7f1a42f192499b372b5bdaea9b13d982c273 | 12d00d6452e19db2f9b7ec1f4bb2574bf2a0f125 | /core/tests/gui/docprops_view_test.py | b2ff52d5d8e6350e1bd4eb5ef38762dc38467648 | [] | no_license | prodigeni/moneyguru | 578f9dfd5542b59d9b01ede1988bdd346c619472 | f8700112c85ca005e52a3c460775d543e219534e | refs/heads/master | 2021-01-16T21:22:26.930380 | 2014-01-25T19:13:23 | 2014-01-25T19:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,138 | py | # Created On: 2011/10/13
# Copyright 2014 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from datetime import date
from hscommon.testutil import eq_
from ...model.date import MonthRange
from ..base import ApplicationGUI, TestApp, with_app
#---
def app_props_shown():
app = TestApp()
app.dpview = app.show_dpview()
return app
@with_app(app_props_shown)
def test_first_weekday_pref(app):
# changing the first weekday affects the bar graphs as expected
app.add_account('Asset')
app.add_txn('31/12/2007', 'entry0', from_='Asset', to='Expense', amount='42')
app.add_txn('1/1/2008', 'entry1', from_='Asset', to='Expense', amount='100')
app.add_txn('20/1/2008', 'entry2', from_='Asset', to='Expense', amount='200')
app.add_txn('31/3/2008', 'entry3', from_='Asset', to='Expense', amount='150')
app.show_account('Expense')
app.doc.date_range = MonthRange(date(2008, 1, 1))
app.clear_gui_calls()
app.dpview.first_weekday_list.select(1) # tuesday
# The month conveniently starts on a tuesday, so the data now starts from the 1st of the month
expected = [('01/01/2008', '08/01/2008', '100.00', '0.00'),
('15/01/2008', '22/01/2008', '200.00', '0.00')]
eq_(app.bar_graph_data(), expected)
app.bargraph_gui.check_gui_calls(['refresh'])
app.dpview.first_weekday_list.select(6) # sunday
expected = [('30/12/2007', '06/01/2008', '142.00', '0.00'),
('20/01/2008', '27/01/2008', '200.00', '0.00')]
eq_(app.bar_graph_data(), expected)
@with_app(app_props_shown)
def test_props_are_doc_based(app, tmpdir):
# properties on dpview are document based, which means that they're saved in the document itself,
# not in the preferences
app.dpview.currency_list.select(42)
app.dpview.first_weekday_list.select(4)
app.dpview.ahead_months_list.select(5)
app.dpview.year_start_month_list.select(6)
fn = str(tmpdir.join('foo.moneyguru'))
# We don't use TestApp.save_and_load() because we don't want to keep the app_gui instance,
# which contains preference, to be sure that the data is actually doc-based
app.doc.save_to_xml(fn)
app = TestApp()
app.doc.load_from_xml(fn)
dpview = app.show_dpview()
eq_(dpview.currency_list.selected_index, 42)
eq_(dpview.first_weekday_list.selected_index, 4)
eq_(dpview.ahead_months_list.selected_index, 5)
eq_(dpview.year_start_month_list.selected_index, 6)
@with_app(app_props_shown)
def test_setting_prop_makes_doc_dirty(app):
assert not app.doc.is_dirty()
app.dpview.first_weekday_list.select(4)
assert app.doc.is_dirty()
@with_app(app_props_shown)
def test_set_default_currency(app):
app.dpview.currency_list.select(1) # EUR
app.add_txn(amount='1')
dpview = app.show_dpview()
dpview.currency_list.select(0) # back to USD
tview = app.show_tview()
eq_(tview.ttable[0].amount, 'EUR 1.00')
| [
"hsoft@hardcoded.net"
] | hsoft@hardcoded.net |
1fddc8f60e25dbe5246c52241828e9f8ef7f8794 | c5d87c7f25e3fe9b17c1e88993b0ed6831e52acb | /N_NumpyDemo/UfuncDemo.py | 80361efe0f9891d4c53759aaa4147977d789136b | [] | no_license | GIS90/python_base_use | e55d55f9df505dac45ddd332fb65dcd08e8e531f | 7166ca85975bb7c56a5fbb6b723fd8300c4dd5d1 | refs/heads/master | 2020-04-02T08:33:49.461307 | 2018-10-23T03:33:41 | 2018-10-23T03:33:41 | 154,249,857 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # -*- coding: utf-8 -*-
import time
import math
import numpy as np
x = [i for i in xrange(1000 * 1000)]
start = time.clock()
for i, t in enumerate(x):
x[i] = math.sin(t)
print "math.sin:", time.clock() - start
x = [i for i in xrange(1000 * 1000)]
x = np.array(x)
start = time.clock()
np.sin(x, x)
print "numpy.sin:", time.clock() - start
x = np.arange(1, 4)
y = np.arange(2, 5)
print np.add(x, y)
print np.subtract(y, x)
print np.multiply(x, y)
print np.divide(y, x)
print np.true_divide(y, x)
print np.floor_divide(y, x)
| [
"mingliang.gao@qunar.com"
] | mingliang.gao@qunar.com |
c380d5414e074af53a8f6f50bebabf3b4b3c1744 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/nltk/sentiment/__init__.py | a36490373306397a161f0d5c1dca7b6295b80110 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Natural Language Toolkit: Sentiment Analysis
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
NLTK Sentiment Analysis Package
"""
from nltk.sentiment.sentiment_analyzer import SentimentAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
| [
"msaineti@icloud.com"
] | msaineti@icloud.com |
c2da014b695577b316d74dae1b9c2c907de8ed3e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /PE8XQipGLS5bhpLZ5_2.py | e60bb0dda75d0b0ea0f996fcc89dad839dde6e66 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | """
Programmer Pete is trying to create a function that returns `True` if two
lists share the same length and have identical numerical values at every
index, otherwise, it will return `False`.
However, the solution his function gives is in an unexpected format. Can you
fix Pete's function so that it behaves as seen in the examples below?
### Examples
check_equals([1, 2], [1, 3]) ➞ False
check_equals([1, 2], [1, 2]) ➞ True
check_equals([4, 5, 6], [4, 5, 6]) ➞ True
check_equals([4, 7, 6], [4, 5, 6]) ➞ False
check_equals([1, 12], [11, 2]) ➞ False
### Notes
Check the **Resources** tab for more info.
"""
def check_equals(lst1, lst2):
return lst1[::] == lst2[::]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
33e909cc2a7d877d650bf04726e1d963b32915bd | 8ab1a4b890d7583b6f21db0fc862fdb9d22e6664 | /src/command_modules/azure-cli-documentdb/azure/cli/command_modules/documentdb/commands.py | 3c290f2181135c5236f626414c5913f761b2087d | [
"MIT"
] | permissive | nnasaki/azure-cli | 278c70a20dbfac49801b7b65ae88a578e7a6ee6d | 8b22ba4a0205a46ce0b07e92c6bf398d8d48b41f | refs/heads/master | 2021-01-13T06:58:53.360882 | 2017-02-08T23:40:47 | 2017-02-08T23:40:47 | 81,398,010 | 0 | 0 | null | 2017-02-09T02:10:28 | 2017-02-09T02:10:27 | null | UTF-8 | Python | false | false | 1,994 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#pylint: disable=line-too-long
from azure.cli.core.commands import cli_command
from azure.cli.command_modules.documentdb._client_factory import (cf_documentdb)
custom_path = 'azure.mgmt.documentdb.operations.database_accounts_operations#{}'
cli_command(__name__, 'documentdb show', custom_path.format('DatabaseAccountsOperations.get'), cf_documentdb)
cli_command(__name__, 'documentdb list', custom_path.format('DatabaseAccountsOperations.list_by_resource_group'), cf_documentdb)
cli_command(__name__, 'documentdb list-all', custom_path.format('DatabaseAccountsOperations.list'), cf_documentdb)
cli_command(__name__, 'documentdb list-keys', custom_path.format('DatabaseAccountsOperations.list_keys'), cf_documentdb)
cli_command(__name__, 'documentdb list-read-only-keys', custom_path.format('DatabaseAccountsOperations.list_read_only_keys'), cf_documentdb)
cli_command(__name__, 'documentdb regenerate-key', custom_path.format('DatabaseAccountsOperations.regenerate_key'), cf_documentdb)
cli_command(__name__, 'documentdb check-name-exists', custom_path.format('DatabaseAccountsOperations.check_name_exists'), cf_documentdb)
cli_command(__name__, 'documentdb delete', custom_path.format('DatabaseAccountsOperations.delete'), cf_documentdb)
cli_command(__name__, 'documentdb failover-priority-change', custom_path.format('DatabaseAccountsOperations.failover_priority_change'), cf_documentdb)
cli_command(__name__, 'documentdb create', 'azure.cli.command_modules.documentdb.custom#cli_documentdb_create', cf_documentdb)
cli_command(__name__, 'documentdb update', 'azure.cli.command_modules.documentdb.custom#cli_documentdb_update', cf_documentdb)
| [
"tjprescott@users.noreply.github.com"
] | tjprescott@users.noreply.github.com |
1f851925cb90f92e112fa28d1468096ce839b9fb | 12346be5075d772878a6015053d6eeb4e7227acc | /12. Polymorphism and Magic Methods - Exercise/wild_farm_04/project/animals/mammals.py | 857d5068bee87a3a5832c985a1c0c49a0c7ad4c8 | [
"MIT"
] | permissive | elenaborisova/Python-OOP | 2a46bfafce868f03481fb699580fb3e60ca4e3bd | 584882c08f84045b12322917f0716c7c7bd9befc | refs/heads/main | 2023-04-02T17:41:23.440617 | 2021-04-10T13:56:38 | 2021-04-10T13:56:38 | 321,376,083 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | from wild_farm_04.project.animals.animal import Mammal
from wild_farm_04.project.food import Meat, Vegetable, Fruit, Food
class Mouse(Mammal):
WEIGHT_INCREASE = 0.10
def make_sound(self):
return "Squeak"
def feed(self, food: Food):
if not isinstance(food, Vegetable) and not isinstance(food, Fruit):
return f"{self.__class__.__name__} does not eat {food.__class__.__name__}!"
self.weight += Mouse.WEIGHT_INCREASE * food.quantity
self.food_eaten += food.quantity
class Dog(Mammal):
WEIGHT_INCREASE = 0.40
def make_sound(self):
return "Woof!"
def feed(self, food: Food):
if not isinstance(food, Meat):
return f"{self.__class__.__name__} does not eat {food.__class__.__name__}!"
self.weight += Dog.WEIGHT_INCREASE * food.quantity
self.food_eaten += food.quantity
class Cat(Mammal):
WEIGHT_INCREASE = 0.30
def make_sound(self):
return "Meow"
def feed(self, food: Food):
if not isinstance(food, Meat) and not isinstance(food, Vegetable):
return f"{self.__class__.__name__} does not eat {food.__class__.__name__}!"
self.weight += Cat.WEIGHT_INCREASE * food.quantity
self.food_eaten += food.quantity
class Tiger(Mammal):
WEIGHT_INCREASE = 1.00
def make_sound(self):
return "ROAR!!!"
def feed(self, food: Food):
if not isinstance(food, Meat):
return f"{self.__class__.__name__} does not eat {food.__class__.__name__}!"
self.weight += Tiger.WEIGHT_INCREASE * food.quantity
self.food_eaten += food.quantity
| [
"elenaborrisova@gmail.com"
] | elenaborrisova@gmail.com |
dcbd1aecb5497cf2e12342d8438f9d1b378500ac | ce6d74994bce49411f00f5053f56fb3b7c30bd50 | /page/structure.py | 0859c21afc62bb61cf2ff3825444c4050b0bca3d | [] | no_license | zhengjiani/pyAlgorithm | 9397906f3c85221e64f0415abfbb64d03eb1c51e | dbd04a17cf61bac37531e3337ba197c4af19489e | refs/heads/master | 2021-07-11T19:07:26.480403 | 2020-07-16T00:25:24 | 2020-07-16T00:25:24 | 179,308,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | # -*- coding: utf-8 -*-
# @Time : 2019/5/14 21:26
# @Author : zhengjiani
# @Software: PyCharm
# @Blog :https://zhengjiani.github.io/
def main():
str1 = 'hello,world!'
#获得字符串首字母大写的拷贝
print(str1.capitalize())
#获得字符串变大写之后的拷贝
print(str1.upper())
#从字符串中查找子串所在的位置
print(str1.find('or'))
print(str1.find('shit'))#-1
#检测字符串是否以指定字符串开头
print(str1.startswith('hel'))
#以指定字符串结尾
print(str1.endswith('!'))
list1 = [1, 3, 5, 7, 100]
# 清空列表元素
list1.clear()
fruits = ['grape', 'apple', 'strawberry', 'waxberry']
fruits += ['pitaya', 'pear', 'mango']
# 可以通过完整切片操作来复制列表
fruits3 = fruits[:]
# 可以通过反向切片操作来获得倒转后的列表的拷贝
fruits5 = fruits[::-1]
list1 = ['orange', 'apple', 'zoo', 'internationalization', 'blueberry']
#sorted函数返回列表排序后的拷贝不会修改传入的列表
list2 = sorted(list1)
# 通过key关键字参数指定根据字符串长度进行排序而不是默认的字母表顺序
list4 = sorted(list1, key=len)
list3 = sorted(list1,reverse=True) | [
"936089353@qq.com"
] | 936089353@qq.com |
7fcaad79d528734e5298e107e368f264d53eb48b | 7c6ba4a791b437f59c5c3d7cb3df4c8b38b582e1 | /btclib/tests/test_mnemonic.py | 86c55a9498821411113876499cba6d9f5698b842 | [
"MIT"
] | permissive | cryptobuks1/btclib | 7183cfe2796ab0f8c0ba20a3268d32957967f9ea | 652b6f9f543fc4274bf4f9401f41390e6ae89549 | refs/heads/master | 2022-05-26T07:03:44.103226 | 2020-04-29T22:33:41 | 2020-04-29T22:33:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | #!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
import unittest
from btclib.mnemonic import (_entropy_from_indexes, _indexes_from_entropy,
_indexes_from_mnemonic, _mnemonic_from_indexes)
class TestMnemonic(unittest.TestCase):
def test_1(self):
lang = "en"
test_mnemonic = "ozone drill grab fiber curtain grace " \
"pudding thank cruise elder eight picnic"
test_indexes = [1268, 535, 810, 685, 433, 811,
1385, 1790, 421, 570, 567, 1313]
indexes = _indexes_from_mnemonic(test_mnemonic, lang)
self.assertEqual(indexes, test_indexes)
mnemonic = _mnemonic_from_indexes(test_indexes, lang)
self.assertEqual(mnemonic, test_mnemonic)
entropy = _entropy_from_indexes(test_indexes, lang)
indexes = _indexes_from_entropy(entropy, lang)
self.assertEqual(indexes, test_indexes)
test_indexes = [0, 0, 2047, 2047, 2047, 2047,
2047, 2047, 2047, 2047, 2047, 0]
entropy = _entropy_from_indexes(test_indexes, lang)
indexes = _indexes_from_entropy(entropy, lang)
self.assertEqual(indexes, test_indexes)
test_indexes = [0, 0, 2047, 2047, 2047, 2047,
2047, 2047, 2047, 2047, 2047, 0]
entropy = _entropy_from_indexes(test_indexes, lang)
indexes = _indexes_from_entropy(entropy, lang)
self.assertEqual(indexes, test_indexes)
if __name__ == "__main__":
# execute only if run as a script
unittest.main() # pragma: no cover
| [
"ferdinando@ametrano.net"
] | ferdinando@ametrano.net |
59d64c4b3cc30cace99d3b6206019add63c53717 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/customization/__init__.py | 41bc51145ebce250458ecaa725eacc26a0918529 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,515 | py | # 2017.02.03 21:49:42 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/customization/__init__.py
from gui.Scaleform.locale.VEHICLE_CUSTOMIZATION import VEHICLE_CUSTOMIZATION
from gui.app_loader.settings import APP_NAME_SPACE
from gui.shared import EVENT_BUS_SCOPE
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.framework import GroupedViewSettings, ViewTypes, ScopeTemplates
from gui.Scaleform.framework.package_layout import PackageBusinessHandler
def getContextMenuHandlers():
return ()
def getViewSettings():
from gui.Scaleform.daapi.view.lobby.customization.filter_popover import FilterPopover
from gui.Scaleform.daapi.view.lobby.customization.purchase_window import PurchaseWindow
return (GroupedViewSettings(VIEW_ALIAS.CUSTOMIZATION_FILTER_POPOVER, FilterPopover, 'customizationFiltersPopoverView.swf', ViewTypes.WINDOW, VIEW_ALIAS.CUSTOMIZATION_FILTER_POPOVER, VIEW_ALIAS.CUSTOMIZATION_FILTER_POPOVER, ScopeTemplates.DEFAULT_SCOPE), GroupedViewSettings(VIEW_ALIAS.CUSTOMIZATION_PURCHASE_WINDOW, PurchaseWindow, 'customizationBuyWindow.swf', ViewTypes.TOP_WINDOW, 'customizationBuyWindow', None, ScopeTemplates.DEFAULT_SCOPE))
CAMOUFLAGES_KIND_TEXTS = [VEHICLE_CUSTOMIZATION.CAMOUFLAGE_WINTER, VEHICLE_CUSTOMIZATION.CAMOUFLAGE_SUMMER, VEHICLE_CUSTOMIZATION.CAMOUFLAGE_DESERT]
CAMOUFLAGES_NATIONS_TEXTS = [VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_USSR,
VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_GERMANY,
VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_USA,
VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_CHINA,
VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_FRANCE,
VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_UK,
VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_JAPAN,
VEHICLE_CUSTOMIZATION.CAMOUFLAGE_NATION_CZECH]
def getBusinessHandlers():
return (CustomizationPackageBusinessHandler(),)
class CustomizationPackageBusinessHandler(PackageBusinessHandler):
def __init__(self):
listeners = ((VIEW_ALIAS.CUSTOMIZATION_FILTER_POPOVER, self.loadViewByCtxEvent), (VIEW_ALIAS.CUSTOMIZATION_PURCHASE_WINDOW, self.loadViewByCtxEvent))
super(CustomizationPackageBusinessHandler, self).__init__(listeners, APP_NAME_SPACE.SF_LOBBY, EVENT_BUS_SCOPE.LOBBY)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\customization\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:49:42 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
e778d43ec5600b5a700508ad1a72209a6c9747e1 | 6e635c0ad243c87bea7e2b9243a0a2414b0bce34 | /apps/authentication/migrations/0028_auto_20180508_1149.py | 7e2a59dbd4891bdb01e0a58e4947800eabbcced0 | [] | no_license | devmaster54/tixon | 43f83835658a53d8b49dbade2364f9b528181d27 | 778d9a8fb95c8fe3214a8423905553c71ed7234a | refs/heads/master | 2022-12-08T16:04:48.678987 | 2020-09-21T10:35:27 | 2020-09-21T10:35:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-08 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0027_bankaccount'),
]
operations = [
migrations.AlterField(
model_name='bankaccount',
name='account_number',
field=models.CharField(max_length=30, verbose_name='Bank Account Number'),
),
]
| [
"soft.expert32@gmail.com"
] | soft.expert32@gmail.com |
8ac992be682c4341076d777a260fae5d7c51e9bb | a744139b01a9c0f81f215225228ecf999dcf8c54 | /construct | 9c3a36cd13af1be8be4549848e8aa819b5fb2d93 | [] | no_license | Kazade/Platformation2 | 720ee81ae1620fed0b655063680aac244528bfdf | f45a23282f01b50198bba337fed0b0520ebe7d59 | refs/heads/master | 2016-09-05T22:22:40.263315 | 2012-08-08T18:04:39 | 2012-08-08T18:04:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | #!/usr/bin/env python
import sys
import os
import subprocess
def test(build_dir):
subprocess.check_call([
"cmake",
".."
], cwd=build_dir)
subprocess.check_call([
"ctest",
"-v"
], cwd=build_dir)
def build(build_dir):
subprocess.check_call([
"cmake",
".."
], cwd=build_dir)
subprocess.check_call([
"make",
"-j4"
], cwd=build_dir)
def run(build_dir):
executable = os.path.split(os.path.dirname(os.path.abspath(__file__)))[-1].lower()
executable = os.path.join(build_dir, executable)
if not os.path.exists(executable):
raise IOError("No such executable: %s" % executable)
subprocess.check_call([
executable
], cwd=os.path.dirname(os.path.abspath(__file__)))
def package():
pass
def design(path):
for f in os.listdir(os.path.join(path, "data", "ui")):
if f.endswith(".ui"):
subprocess.check_call(["glade", os.path.join(path, "data", "ui", f)])
def main():
path = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(path, ".build")
if not os.path.exists(build_dir):
os.mkdir(build_dir)
if "build" in sys.argv:
build(build_dir)
elif "test" in sys.argv:
build(build_dir)
test(build_dir)
elif "run" in sys.argv:
run(build_dir)
elif "design" in sys.argv:
design(path)
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"kazade@gmail.com"
] | kazade@gmail.com | |
ef47288aa706a958f4aa96dc746f97807517cd23 | 3ac0923505e1e03a07742355edec43f23ead82b7 | /Daily/PY/Leetcode148-排序链表.py | 1f913ce6d808ffc372248ce13886fbd6d8c9940c | [] | no_license | lock19960613/SCL | 5055c940d2529eef981a29698c7ea04212a8b9de | 3ea28fd8f5d5233411341283c85667e4b9fc64d5 | refs/heads/main | 2023-08-03T04:36:33.555296 | 2021-09-11T06:48:49 | 2021-09-11T06:48:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | from typing import List
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
#前闭后开,只有一个元素就不必归并
def sortfunc(head,tail)->ListNode:
if not head:
return head
if head.next == tail:
head.next = None
return head
slow = head
fast = head
while fast != tail:
slow = slow.next
fast = fast.next
if fast != tail:
fast = fast.next
mid = slow
return merge(sortfunc(head,mid),sortfunc(mid,tail))
def merge(head1,head2)->ListNode:
dummy = ListNode(0)
tmp,tmp1,tmp2 = dummy,head1,head2
while tmp1 and tmp2:
if tmp1.val <= tmp2.val:
tmp.next = tmp1
tmp1 = tmp1.next
else:
tmp.next = tmp2
tmp2 = tmp2.next
tmp = tmp.next
if tmp1:
tmp.next = tmp1
elif tmp2:
tmp.next = tmp2
return dummy.next
return sortfunc(head,None)
| [
"597494370@qq.com"
] | 597494370@qq.com |
cb1e4850ceac7f39220c7715a4f73b64205e0678 | 727e50c524c229bc7736a757fbc51cc5939b7e10 | /peering/migrations/0066_auto_20201212_2301.py | 4e4b38d466bb48f26c6c81dd75532b740942fc86 | [
"Apache-2.0"
] | permissive | netravnen/peering-manager | 71fbe1801fe6e063ac1b4375cdb9fe3c8c3feee5 | c2a5149b3cb197291e0c9c10040738ce5fb29f02 | refs/heads/main | 2023-08-17T02:56:43.799975 | 2023-07-04T18:23:15 | 2023-07-04T18:23:15 | 149,284,135 | 0 | 0 | Apache-2.0 | 2023-09-11T08:18:27 | 2018-09-18T12:24:28 | Python | UTF-8 | Python | false | false | 1,255 | py | # Generated by Django 3.1.4 on 2020-12-12 22:01
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("peeringdb", "0014_auto_20201208_1856"),
("peering", "0065_auto_20201025_2137"),
]
operations = [
migrations.RemoveField(
model_name="autonomoussystem",
name="potential_internet_exchange_peering_sessions",
),
migrations.RemoveField(
model_name="internetexchange",
name="peeringdb_id",
),
migrations.AddField(
model_name="internetexchange",
name="peeringdb_ix",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peeringdb.internetexchange",
),
),
migrations.AddField(
model_name="internetexchange",
name="peeringdb_netixlan",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="peeringdb.networkixlan",
),
),
]
| [
"guillaume@mazoyer.eu"
] | guillaume@mazoyer.eu |
ec7ab34529ebb34beec89f1b1695661f31866fd7 | 357c69a0441089b1cf5b62a0e21825488f02b4f9 | /cart/migrations/0002_auto_20181203_1255.py | fc01db8ae79dff6fecd12ae3b04cdd23aaaea4d4 | [] | no_license | OBAA/oneoverthree_subdomain | 3e40d5ae6a59bf4b97afb4d475d6fa9c736ee170 | 57b42058cb7c49b3b37f5aa1e444dc4650da9188 | refs/heads/master | 2022-12-11T16:13:02.152275 | 2019-03-26T18:53:28 | 2019-03-26T18:53:28 | 157,768,081 | 1 | 0 | null | 2022-11-22T02:56:19 | 2018-11-15T20:23:58 | HTML | UTF-8 | Python | false | false | 440 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-12-03 11:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='couponcode',
name='amount',
field=models.IntegerField(null=True),
),
]
| [
"agbanabolu@gmail.com"
] | agbanabolu@gmail.com |
03095ea85a3f9b4f737f8a87456734d2fe174563 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/parties/StretchingArrow.py | 395a48fcbcaeeaf4a7a62dd12711c78284c15b87 | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 5,025 | py | #===============================================================================
# Contact: Carlos Pineda (Schell Games)
# Created: October 2009
#
# Purpose: Arrow widget that stretches from one point to another.
# Used in PartyCogActivity
#===============================================================================
import math
from direct.gui.DirectGui import DirectFrame
from pandac.PandaModules import Point3
class StretchingArrow(DirectFrame):
"""
Arrow widget that can stretch its body to any specified size.
"""
notify = directNotify.newCategory("StretchingArrow")
arrowMoving = 0
arrowBegin = 1
arrowComplete = 2
body = None
head = None
def __init__(self, parent, useColor="blue", autoload=True):
DirectFrame.__init__(self, parent)
self.useColor = useColor
self.endOffset = 1.5
self.startOffset = 0.0
self.shrinkRange = 7.0
#self.distanceDrawn = 0.0
self.ratioDrawn = 0.0
if autoload:
self.load()
self.stash()
def load(self):
model = loader.loadModel("phase_13/models/parties/stretchingArrow")
model.setP(-90)
self.body = model.find("**/arrowBody_" + self.useColor)
self.body.wrtReparentTo(self)
self.head = model.find("**/arrowHead_" + self.useColor)
self.head.wrtReparentTo(self)
model.removeNode()
def unload(self):
if self.body is not None:
self.body.removeNode()
self.body = None
if self.head is not None:
self.body.removeNode()
self.body = None
def reset(self):
self.ratioDrawn = 0.0
def draw(self, fromPoint, toPoint, rotation=0, animate=True):
arrowlength = 2.72
# TODO: Review this description. Is it still true?
"""
Draws the arrow from fromPoint to toPoint, with the head at the toPoint.
The arrow is animated to draw from the start to the finish, fade out and
start again. when the animation begins, StretchingArrow.arrowBegin is
returned. When it completes, StretchingArrow.arrowComplete is returned.
At all other times, StretchingArrow.arrowMoving is returned.
"""
if self.body is None or self.head is None:
assert(self.notify.debug("draw(): Assets not loaded, therefore cannot draw"))
return
actualDifference = fromPoint - toPoint
actualLength = actualDifference.length()
oldRatio = self.ratioDrawn
# TODO: move this to the constructor so it is only called once.
# it is here now to allow for changes at runtime
drawSpeed = 1.6
drawSpeedMin = 0.6
downTime = 1.0
fadeOutTime = 0.5
# calculate how fast to draw the arrow
drawRate = max(drawSpeedMin, drawSpeed * actualLength / arrowlength)
# increment how much of the arrow is drawn
self.ratioDrawn += globalClock.getDt() / drawRate
# set basic return
result = StretchingArrow.arrowMoving
# if arrow hits its end, set it back to the downTime
if self.ratioDrawn >= 1.0:
result = StretchingArrow.arrowComplete
self.ratioDrawn = -downTime
# if arrow is starting out
if cmp(oldRatio,0) != cmp(self.ratioDrawn, 0) and result != StretchingArrow.arrowComplete:
result = StretchingArrow.arrowBegin
if not animate:
self.ratioDrawn = 1.0
normal = Point3(actualDifference.getX(), actualDifference.getY(), actualDifference.getZ())
normal.normalize()
rotation = math.degrees(math.atan2(actualDifference.getY(), actualDifference.getX()))
endPoint = toPoint + (normal * self.endOffset)
startPoint = fromPoint - (normal * self.startOffset)
newlength = (endPoint - startPoint).length() / arrowlength
newScale = min(actualLength/self.shrinkRange, 1.0)
self.head.setScale(newScale)
ratio = max(0.0, self.ratioDrawn)
if ratio == 0.0:
ratio = 1.0
newlength *= ratio
if actualLength < self.endOffset:
self.stash()
else:
self.unstash()
self.body.setPos(startPoint)
self.body.setH(rotation)
self.head.setH(rotation -90)
self.body.setScale(newlength-(0.013*newScale), newScale, newScale)
vec = (startPoint-endPoint)
vec *= ratio
self.head.setPos(startPoint - vec)
self.head.setZ(render, self.body.getZ(render) + 0.001)
# fade out if it is in negative
if self.ratioDrawn < 0.0:
self.setAlphaScale((abs(self.ratioDrawn) - (downTime - fadeOutTime)))
else:
self.setAlphaScale(1.0) | [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
e40a6b0ee9fd1e1182e44ca2841e48e14fdad6e7 | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /532.py | 90b9285049a1d948b8f349ad01b68e170b6c8e1b | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Ref: https://leetcode.com/problems/k-diff-pairs-in-an-array/discuss/100135/Easy-Understood-Python-Solution
import collections
class Solution(object):
def findPairs(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
cnt = collections.Counter(nums)
ans = 0
for c in cnt:
if k > 0 and c + k in cnt or k == 0 and cnt[c] > 1:
ans += 1
return ans
if __name__ == "__main__":
sol = Solution()
print sol.findPairs([3, 1, 4, 1, 5], 2)
| [
"tlhuang@tlhuang.net"
] | tlhuang@tlhuang.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.