blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cef6d955fd4d1a75af3860ab27581432f225c11 | 7d096568677660790479d87c22b47aae838ef96b | /stubs-legacy/System/ComponentModel/__init___parts/DisplayNameAttribute.py | 6564ffaab714f51b11aa1f4215aabc8dcb0b85d1 | [
"MIT"
] | permissive | NISystemsEngineering/rfmx-pythonnet | 30adbdd5660b0d755957f35b68a4c2f60065800c | cd4f90a88a37ed043df880972cb55dfe18883bb7 | refs/heads/master | 2023-02-04T00:39:41.107043 | 2023-02-01T21:58:50 | 2023-02-01T21:58:50 | 191,603,578 | 7 | 5 | MIT | 2023-02-01T21:58:52 | 2019-06-12T16:02:32 | Python | UTF-8 | Python | false | false | 2,076 | py | class DisplayNameAttribute(Attribute,_Attribute):
"""
Specifies the display name for a property,event,or public void method which takes no arguments.
DisplayNameAttribute()
DisplayNameAttribute(displayName: str)
"""
def Equals(self,obj):
"""
Equals(self: DisplayNameAttribute,obj: object) -> bool
Determines whether two System.ComponentModel.DisplayNameAttribute instances are equal.
obj: The System.ComponentModel.DisplayNameAttribute to test the value equality of.
Returns: true if the value of the given object is equal to that of the current object; otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: DisplayNameAttribute) -> int
Returns the hash code for this instance.
Returns: A hash code for the current System.ComponentModel.DisplayNameAttribute.
"""
pass
def IsDefaultAttribute(self):
"""
IsDefaultAttribute(self: DisplayNameAttribute) -> bool
Determines if this attribute is the default.
Returns: true if the attribute is the default value for this attribute class; otherwise,false.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,displayName=None):
"""
__new__(cls: type)
__new__(cls: type,displayName: str)
"""
pass
def __ne__(self,*args):
pass
DisplayName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the display name for a property,event,or public void method that takes no arguments stored in this attribute.
Get: DisplayName(self: DisplayNameAttribute) -> str
"""
DisplayNameValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the display name.
"""
Default=None
| [
"sean.moore@ni.com"
] | sean.moore@ni.com |
c79f144acca2207d3e4d4d90e89aa916295f9227 | b53a84f6b0463cd8459e282b77cf3edc61735f61 | /jaraco/mongodb/service.py | 8c14e6c5be8de1b98928e8c672e98fd23982105d | [
"MIT"
] | permissive | jaraco/jaraco.mongodb | 6619f8019d474c7d419346b3a90faa66b6f43e81 | a852399c506c5d2ed71950ecd9b5f469ff4a4040 | refs/heads/main | 2023-08-31T16:42:20.542742 | 2023-08-31T13:26:45 | 2023-08-31T13:26:45 | 45,183,461 | 5 | 1 | MIT | 2023-09-01T20:06:43 | 2015-10-29T13:04:03 | Python | UTF-8 | Python | false | false | 7,994 | py | import os
import sys
import tempfile
import subprocess
import glob
import collections
import importlib
import shutil
import functools
import logging
import datetime
import pathlib
import contextlib
from typing import Dict, Any
import portend
from jaraco.services import paths
from jaraco import services
from tempora import timing
from . import manage
from . import cli
from . import install
log = logging.getLogger(__name__)
class MongoDBFinder(paths.PathFinder):
windows_installed = glob.glob('/Program Files/MongoDB/Server/???/bin')
windows_paths = [
# symlink Server/current to Server/X.X
'/Program Files/MongoDB/Server/current/bin',
# symlink MongoDB to mongodb-win32-x86_64-2008plus-X.X.X-rcX
'/Program Files/MongoDB/bin',
] + list(reversed(windows_installed))
heuristic_paths = [
# on the path
'',
# 10gen Debian package
'/usr/bin',
# custom install in /opt
'/opt/mongodb/bin',
] + windows_paths
# allow the environment to stipulate where mongodb must
# be found.
env_paths = [
os.path.join(os.environ[key], 'bin')
for key in ['MONGODB_HOME']
if key in os.environ
]
candidate_paths = env_paths or heuristic_paths
exe = 'mongod'
args = ['--version']
@classmethod
def find_binary(cls):
return os.path.join(cls.find_root(), cls.exe)
@classmethod
@contextlib.contextmanager
def ensure(cls):
try:
yield cls.find_root()
except RuntimeError:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp = pathlib.Path(tmp_dir)
root = install.install(target=tmp).joinpath('bin')
cls.candidate_paths.append(root)
yield root
cls.candidate_paths.remove(root)
class MongoDBService(MongoDBFinder, services.Subprocess, services.Service):
port = 27017
process_kwargs: Dict[str, Any] = {}
"""
keyword arguments to Popen to control the process creation
"""
@services.Subprocess.PortFree()
def start(self):
super(MongoDBService, self).start()
# start the daemon
mongodb_data = os.path.join(sys.prefix, 'var', 'lib', 'mongodb')
cmd = [
self.find_binary(),
'--dbpath=' + mongodb_data,
]
self.process = subprocess.Popen(cmd, **self.process_kwargs)
self.wait_for_pattern(r'waiting for connections on port (?P<port>\d+)')
log.info('%s listening on %s', self, self.port)
class MongoDBInstance(MongoDBFinder, services.Subprocess, services.Service):
process_kwargs: Dict[str, Any] = {}
"""
keyword arguments to Popen to control the process creation
"""
def merge_mongod_args(self, add_args):
self.port, add_args[:] = cli.extract_param('port', add_args, type=int)
self.mongod_args = add_args
def start(self):
super(MongoDBInstance, self).start()
if not hasattr(self, 'port') or not self.port:
self.port = portend.find_available_local_port()
self.data_dir = tempfile.mkdtemp()
cmd = [
self.find_binary(),
'--dbpath',
self.data_dir,
'--port',
str(self.port),
] + list(self.mongod_args)
if hasattr(self, 'bind_ip') and '--bind_ip' not in cmd:
cmd.extend(['--bind_ip', self.bind_ip])
self.process = subprocess.Popen(cmd, **self.process_kwargs)
portend.occupied('localhost', self.port, timeout=10)
log.info(f'{self} listening on {self.port}')
def get_connection(self):
pymongo = importlib.import_module('pymongo')
return pymongo.MongoClient('localhost', self.port)
def purge_all_databases(self):
manage.purge_all_databases(self.get_connection())
def get_connect_hosts(self):
return [f'localhost:{self.port}']
def get_uri(self):
return 'mongodb://' + ','.join(self.get_connect_hosts())
def stop(self):
super(MongoDBInstance, self).stop()
shutil.rmtree(self.data_dir)
del self.data_dir
class ExtantInstance:
def __init__(self, uri):
self.uri = uri
def get_connection(self):
pymongo = importlib.import_module('pymongo')
return pymongo.MongoClient(self.uri)
def get_uri(self):
return self.uri
class MongoDBReplicaSet(MongoDBFinder, services.Service):
replica_set_name = 'test'
mongod_parameters = (
'--oplogSize',
'10',
)
def start(self):
super(MongoDBReplicaSet, self).start()
self.data_root = tempfile.mkdtemp()
self.instances = list(map(self.start_instance, range(3)))
# initialize the replica set
self.instances[0].connect().admin.command(
'replSetInitiate', self.build_config()
)
# wait until the replica set is initialized
get_repl_set_status = functools.partial(
self.instances[0].connect().admin.command, 'replSetGetStatus', 1
)
errors = importlib.import_module('pymongo.errors')
log.info('Waiting for replica set to initialize')
watch = timing.Stopwatch()
while watch.elapsed < datetime.timedelta(minutes=5):
try:
res = get_repl_set_status()
if res.get('myState') != 1:
continue
except errors.OperationFailure:
continue
break
else:
raise RuntimeError("timeout waiting for replica set to start")
def start_instance(self, number):
port = portend.find_available_local_port()
data_dir = os.path.join(self.data_root, repr(number))
os.mkdir(data_dir)
cmd = [
self.find_binary(),
'--dbpath',
data_dir,
'--port',
str(port),
'--replSet',
self.replica_set_name,
] + list(self.mongod_parameters)
log_file = self.get_log(number)
process = subprocess.Popen(cmd, stdout=log_file)
portend.occupied('localhost', port, timeout=50)
log.info(f'{self}:{number} listening on {port}')
return InstanceInfo(data_dir, port, process, log_file)
def get_log(self, number):
log_filename = os.path.join(self.data_root, f'r{number}.log')
log_file = open(log_filename, 'a')
return log_file
def is_running(self):
return hasattr(self, 'instances') and all(
instance.process.returncode is None for instance in self.instances
)
def stop(self):
super(MongoDBReplicaSet, self).stop()
for instance in self.instances:
if instance.process.returncode is None:
instance.process.terminate()
instance.process.wait()
instance.log_file.close()
del self.instances
shutil.rmtree(self.data_root)
def build_config(self):
return dict(
_id=self.replica_set_name,
members=[
dict(
_id=number,
host=f'localhost:{instance.port}',
)
for number, instance in enumerate(self.instances)
],
)
def get_connect_hosts(self):
return [f'localhost:{instance.port}' for instance in self.instances]
def get_uri(self):
return 'mongodb://' + ','.join(self.get_connect_hosts())
def get_connection(self):
pymongo = importlib.import_module('pymongo')
return pymongo.MongoClient(self.get_uri())
InstanceInfoBase = collections.namedtuple(
'InstanceInfoBase', 'path port process log_file'
)
class InstanceInfo(InstanceInfoBase):
def connect(self):
pymongo = __import__('pymongo')
rp = pymongo.ReadPreference.PRIMARY_PREFERRED
return pymongo.MongoClient(f'localhost:{self.port}', read_preference=rp)
| [
"jaraco@jaraco.com"
] | jaraco@jaraco.com |
f66ddbefcac3491e18bd0349b504dd7153773ab6 | 286b6dc56323f982092ffafbfac8a32dbbaeb7ef | /training_assignments/Day01_Assignments/sample_script.py | 65a667fdae2ff91bbab32ae286a3098f284439d4 | [] | no_license | learndevops19/pythonTraining-CalsoftInc | ccee0d90aadc00bfdb17f9578620f6bf92f80a4c | c5f61516b835339b394876edd1c6f62e7cc6f0c3 | refs/heads/master | 2021-02-05T04:27:17.590913 | 2019-11-20T17:27:06 | 2019-11-20T17:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | #!usr/bin/env python
"""
This is module level docstring
"""
# import statement to import external lib in the current module
import sample_lib
# Function definition with argument `limit`
def get_all_prime(limit):
# This is a docstring of a function
"""
This is function level docstring
Args:
limit:
Returns:
"""
i = 2 # local variable
# while statement with condition
while limit > 0:
# If statement with external lib function call
if sample_lib.is_prime(i):
limit = limit - 1
#print(i)
i = i+1
# Function scope finishes when the indent is over
# Entry point for the execution of the script
if __name__ == "__main__": # Checking the the script name is __main__
# Getting input from stdin
count = int(input("Enter count of prime numbers:"))
# function call
get_all_prime(count)
| [
"rajpratik71@gmail.com"
] | rajpratik71@gmail.com |
497387657e44a2b62a1cecc699a680722893a502 | 03e5f1fc05a65d6591964341bb90ca20ee9ae687 | /path_finder.py | 81680d22f3823108eb3319b80d4568c78f57d269 | [] | no_license | orenovadia/word_chains | 391ba7c603d20880830ef01fc0fc42b87dee9e1e | 2253c7f808b2d0978581e437232853f8d0eb58bb | refs/heads/master | 2020-06-22T09:38:25.460906 | 2019-07-19T03:34:18 | 2019-07-19T03:34:18 | 197,692,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | from typing import Optional, Dict, List
from oneedgraph import OneEdGraph
class PathFinder(object):
def __init__(self, graph: OneEdGraph) -> None:
super().__init__()
self._graph = graph
def shortest_path(self, source, destination) -> Optional[List[str]]:
if len(source) != len(destination):
return None
prev = {source: None} # type: Dict[str, Optional[str]]
def record_prev(u, v):
prev[u] = v
return u
level = {source}
seen = set()
while level:
if destination in level:
break
seen.update(level)
level = {record_prev(adjacent, current_word)
for current_word in level
for adjacent in self._graph.adjacent(current_word)
if adjacent not in seen}
if destination not in prev:
return None
path = []
v = destination
while v:
path.append(v)
v = prev[v]
path.reverse()
return path
| [
"orenovad@gmail.com"
] | orenovad@gmail.com |
f3e0686647a601213f779653cb8e50d2cd76685a | 7a2ad6c274add0f88cab8f6f89de551ff86597c9 | /AOC2018/days/day4/tests/test_Day4Puzzle2.py | 15151e77987d77062fa679b325f6d1a71dc2718b | [] | no_license | Akivashi/AOC | 8b66cecc9c6cf32c318d5b605f7f9ee952aad373 | cbd3a766db1a4b6560c4adcf978ec79b30707032 | refs/heads/master | 2023-01-29T17:57:39.704089 | 2020-12-08T18:41:15 | 2020-12-09T18:55:10 | 318,249,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from unittest import TestCase
from Day4Puzzle2 import Day4Puzzle2
class Day4Puzzle2Test(TestCase):
def setUp(self):
self.puzzle = Day4Puzzle2()
def test_puzzle(self):
self.assertEqual(self.puzzle.solution("tests/Day4_test_input1.txt"), 4455)
| [
"rene@infi.nl"
] | rene@infi.nl |
46a9ffa4ccd9fe2bae9d06ccad77a6be1c64fab1 | c867b7d1b26547f76605676725367eee89a9c6bb | /appimagebuilder/context.py | ad0994d1daa39762a43b8aead53128cb2ca43e72 | [
"MIT"
] | permissive | jclab-joseph/appimage-builder | a6ceae075eb0e6d2e61df9e1fe38371606a0c4e7 | e757003a6c60ea72721c866758befa2dc6a50058 | refs/heads/master | 2023-08-24T06:20:51.154749 | 2021-09-21T02:15:09 | 2021-09-21T02:15:09 | 412,281,434 | 0 | 0 | MIT | 2021-10-01T08:48:38 | 2021-10-01T01:10:51 | null | UTF-8 | Python | false | false | 2,438 | py | # Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import pathlib
class AppInfo:
id: str
name: str
icon: str
version: str
exec: str
exec_args: str
def __init__(
self,
id: str = None,
name: str = None,
icon: str = None,
version: str = None,
exec: str = None,
exec_args: str = None,
):
self.id = id
self.name = name
self.icon = icon
self.version = version
self.exec = exec
self.exec_args = exec_args
class BundleInfo:
"""Application information"""
app_dir: pathlib.Path
app_info: AppInfo
# update string to be attached into
update_string: str
# appimage runtime arch
runtime_arch: str
# sign key to be used
sign_key: str
# resulting file name
file_name: str
def __init__(
self,
app_dir: pathlib.Path = None,
app_info: AppInfo = None,
update_string: str = None,
runtime_arch: str = None,
sign_key: str = None,
file_name: str = None,
):
self.app_dir = app_dir
self.app_info = AppInfo() if not app_info else app_info
self.update_string = update_string
self.runtime_arch = runtime_arch
self.sign_key = sign_key
self.file_name = file_name
class Context:
"""Define a context for commands"""
app_info: AppInfo
bundle_info: BundleInfo
app_dir: pathlib.Path
cache_dir: pathlib.Path
# Used by command to register their actions
record: dict
def __init__(
self, app_info, bundle_info, app_dir: pathlib.Path, cache_dir: pathlib.Path
):
self.app_info = app_info
self.bundle_info = bundle_info
self.app_dir = app_dir.absolute()
self.cache_dir = cache_dir.absolute()
self.record = {}
| [
"contact@azubieta.net"
] | contact@azubieta.net |
9ed19f3d4dcddbbdb88440d766807fbdb9b7ba36 | aab4acf5f144985ef0ba69fa122ecdb0973a61e3 | /python_experiments/algorithm_vis/pscan_algo_vis.py | 2a16fd7c491dbf95d0b2faacd32a4a9f8c8df1ff | [
"MIT"
] | permissive | zzerain/ppSCAN | 1f2e07fed7b7bb6ae40a7f5f6b7721d92f74eb0c | 691b39309da1c6b5df46b264b5a300a35d644f70 | refs/heads/master | 2023-03-19T06:41:15.712172 | 2020-06-04T03:24:55 | 2020-06-04T03:24:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,070 | py | import networkx as nx
import matplotlib.pyplot as plt
from pscan_algo_naive import to_csr_graph
from itertools import chain
def vis_input(graph, min_cn, similar_deg_lst, min_pts, graph_name):
"""
:type graph: nx.Graph
"""
# draw background graph
pos = nx.circular_layout(graph)
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=12, node_size=380, alpha=1.0, width=4,
node_color='black')
nx.draw_networkx(graph, with_labels=True, pos=pos, font_size=12, node_size=350, alpha=1.0, width=4,
edge_color='grey', node_color='white')
# parse edge list and its property
src_u_lst = list(chain.from_iterable(
map(lambda pair: [pair[0] for _ in xrange(pair[1])], zip(range(len(deg_lst)), deg_lst))))
edge_with_property_lst = filter(lambda pair: pair[0][0] < pair[0][1], zip(zip(src_u_lst, dst_v_lst), min_cn))
print edge_with_property_lst
# nx.draw_networkx_edge_labels(graph, pos=pos, edge_labels=dict(edge_with_property_lst))
blue_edges = map(lambda pair: pair[0], filter(lambda pair: pair[1] == -2, edge_with_property_lst))
nx.draw_networkx_edges(graph, pos=pos, edgelist=blue_edges, edge_color='b', width=4)
red_edges = map(lambda pair: pair[0], filter(lambda pair: pair[1] == -1, edge_with_property_lst))
nx.draw_networkx_edges(graph, pos=pos, edgelist=red_edges, edge_color='r', width=4)
# alpha_lst = map(lambda similar_deg: min(float(similar_deg) / min_pts, 1), similar_deg_lst)
alpha_lst = map(lambda similar_deg: 0 if similar_deg< min_pts else 1, similar_deg_lst)
print alpha_lst
for idx, alpha in enumerate(alpha_lst):
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=12, node_size=350, alpha=alpha,
nodelist=[idx], node_color='r')
plt.axis('off')
plt.savefig('./' + graph_name + '.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
def vis_input_only(graph):
# draw background graph
pos = nx.circular_layout(graph)
nx.draw_networkx_nodes(graph, with_labels=True, pos=pos, font_size=12, node_size=380, alpha=1.0, width=4,
node_color='black')
nx.draw_networkx(graph, with_labels=True, pos=pos, font_size=12, node_size=350, alpha=1.0, width=4,
edge_color='grey', node_color='white')
plt.axis('off')
plt.savefig('./' + 'pure_demo_input_graph' + '.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.show()
if __name__ == '__main__':
graph = nx.read_edgelist('demo_input_graph.txt', nodetype=int)
offset_lst, dst_v_lst, deg_lst = to_csr_graph(graph)
print 'csr representation:\noffset_lst=', offset_lst, '\ndst_v_lst=', dst_v_lst, '\ndeg_lst=', deg_lst, '\n'
vis_input_only(graph)
# demo input graph
min_cn = [0 for _ in xrange(len(dst_v_lst))]
similar_degree_lst = [0 for _ in xrange(len(deg_lst))]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3, graph_name='demo_input_graph')
# after 1. pruning
min_cn = [-1, 0, 3, 3, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 3, 0, 3, 0, 3, 3, 4, 0, 3, 3, 0, 0, 3, 0, 0, 0, -2, -2, 0, 0]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3, graph_name='after_pruning_graph')
# after 2.1 check core
min_cn = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -2, -2, -2, -2]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3,
graph_name='after_check_core_1st_bsp_graph')
# after 2.2 check core
similar_degree_lst = [1, 4, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0]
min_cn = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -2, -2, -2, -2]
vis_input(graph, min_cn=min_cn, similar_deg_lst=similar_degree_lst, min_pts=3,
graph_name='after_check_core_2nd_bsp_graph')
| [
"yche@cse.ust.hk"
] | yche@cse.ust.hk |
01e0c980fc1f652ee966283c3a6a783c0446f6fa | 2b85d16098aaae7c9aa0cc0e670b0a67df658d78 | /app/user/admin.py | 2d5d9594b3a908835a95ad51aee272ab59f8fe70 | [] | no_license | fuadaghazada/fampact-backend | 2c9ac7ba4b3e7efd5278f75ee32f7484a7409bcb | 6f0f9e3b7e2e544a4fe3a9bfa2451712e1dd1307 | refs/heads/master | 2023-03-08T09:21:33.728770 | 2021-02-21T13:09:14 | 2021-02-21T13:09:14 | 340,579,319 | 1 | 0 | null | 2021-02-20T06:43:59 | 2021-02-20T06:23:59 | null | UTF-8 | Python | false | false | 1,575 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext_lazy as _
from scoring.models import Score
from .models import User, Family
class UserAdminInline(admin.StackedInline):
model = User
extra = 1
class UserAdmin(BaseUserAdmin):
list_display = (
'first_name',
'last_name',
'email',
'username',
'verified_at',
'family',
'role',
'score',
'd_o_b'
)
list_filter = ('is_superuser', 'role')
fieldsets = (
(None, {'fields': ('email', 'username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'd_o_b')}),
(_('Permissions'), {'fields': ('is_superuser', 'verified_at')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
search_fields = ('first_name', 'last_name', 'email')
ordering = ('first_name', 'last_name', 'email',)
filter_horizontal = ()
def score(self, instance):
return Score.objects.calculate_user_score(instance)
class FamilyAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at', 'updated_at', 'score')
inlines = [
UserAdminInline
]
def get_queryset(self, request):
return Score.objects.public_leader_board_qs()
def score(self, instance):
return instance.score
admin.site.register(User, UserAdmin)
admin.site.register(Family, FamilyAdmin)
| [
"fuad.aghazada98@gmail.com"
] | fuad.aghazada98@gmail.com |
be585520b660cff0acce377f5f031333de1360bc | 148ac8d601369aaae6918cf0a55a4d4f5afb5e75 | /decision_tree.py | 9a755464423d1acfd379b30272aafd32ac13f513 | [] | no_license | MrVersatile007/ML-with-Rishi | a7800e27f5cbac9b68d526469beb380ed59bb029 | db76aa26ef5d349237d0fa1f0bdd677352dfb392 | refs/heads/main | 2023-06-02T15:53:00.902907 | 2021-06-27T15:15:01 | 2021-06-27T15:15:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 26 20:44:13 2021
@author: RISHBANS
"""
import pandas as pd
dataset = pd.read_csv("Apply_Job.csv")
X = dataset.iloc[:, 0:2].values
y = dataset.iloc[:, 2].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.tree import DecisionTreeClassifier
dt_c = DecisionTreeClassifier()
dt_c.fit(X_train, y_train)
pred_test = dt_c.predict(X_test)
from sklearn.metrics import accuracy_score
test_accuracy = accuracy_score(y_test, pred_test)
print(test_accuracy)
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
#Define Variables
clf = dt_c
h = 0.1
X_plot, z_plot = X_train, y_train
#Standard Template to draw graph
x_min, x_max = X_plot[:, 0].min() - 1, X_plot[:, 0].max() + 1
y_min, y_max = X_plot[:, 1].min() - 1, X_plot[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh
Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z,
alpha = 0.7, cmap = ListedColormap(('blue', 'red')))
for i, j in enumerate(np.unique(z_plot)):
plt.scatter(X_plot[z_plot == j, 0], X_plot[z_plot == j, 1],
c = ['blue', 'red'][i], cmap = ListedColormap(('blue', 'red')), label = j)
#X[:, 0], X[:, 1]
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('Decision Tree')
plt.xlabel('Exp in Year')
plt.ylabel('Salary in Lakh')
plt.legend() | [
"rishibansal02@gmail.com"
] | rishibansal02@gmail.com |
43cb66f4ba8525d2f2ac814c63860c31289b6fec | 8d161515037cd42dcd6404a068620c11b2597ae8 | /LeetCode/__Contest__/Day6/n_unique_with_zero_sum.py | 8c46dd891297268ec0052581421805d84ade21e0 | [] | no_license | YiseBoge/CompetitiveProgramming | 433526d18a7bfe754f9e1c8d20b2b234d7b7568c | 26b720623bcc1afe054101a13ca37b65ff518ce1 | refs/heads/master | 2021-12-03T17:52:00.048476 | 2021-11-23T17:40:14 | 2021-11-23T17:40:14 | 225,392,780 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | class Solution:
def sumZero(self, n: int) -> list:
half = n // 2
result = []
for i in range(1, half + 1):
result.append(i)
result.append(-i)
if n % 2 != 0:
result.append(0)
return result
def solution(l1):
s = Solution()
return s.sumZero(l1)
def main():
inp1 = 10
print(solution(inp1))
if __name__ == '__main__':
main()
| [
"Ethiopia1!"
] | Ethiopia1! |
ab5a9fce2084bd6d7cef5bc3ab7c1ca5a3c03263 | d14193a5d565c4f8ad9d69a975ae24e62c26943a | /easystack_dashboard/dashboards/admin/volumes/panel.py | dce68793b44c8fbd1409a0af964fe708730b8fea | [
"Apache-2.0"
] | permissive | oksbsb/horizon-acc | b84783c5a81a2678195c45d31a24ca214b69562f | 9524f1952461c83db485d5d1702c350b158d7ce0 | refs/heads/master | 2020-03-19T03:20:52.007840 | 2017-11-14T09:17:04 | 2017-11-14T09:17:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
from easystack_dashboard.dashboards.admin import dashboard
class Volumes(horizon.Panel):
name = _("Volumes")
slug = 'volumes'
permissions = ('openstack.services.volume',)
dashboard.EasyStack_Admin.register(Volumes)
| [
"zhoub1986@aliyun.com"
] | zhoub1986@aliyun.com |
daf6eb50c0ad9b90364b4f70c429ab710dba025b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1661.py | bf7150e7305f8acd5fc0af408e39ef0d48d99b88 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | import numpy as np
with open('in.txt') as f:
lines = f.readlines()
lines = [l.split('\n')[0] for l in lines]
t = int(lines[0])
def count_speed(d, horses):
d = float(d)
if len(horses) == 1:
horse = horses[0]
t = (d - horse[0]) / horse[1]
return d / t
else:
horses = sorted(horses, key=lambda x: x[0])
print horses
t = 0
while len(horses) > 1:
h1 = horses[0]
h2 = horses[1]
if h2[1] == h1[1]:
x = -1000000000000
else:
x = (h1[0] * h2[1] - h2[0] * h1[1]) / (h2[1] - h1[1])
print x
print h1[0]
print h2[0]
if x < min(h1[0], h2[0]) or x > d:
print 'horses do not meet'
else:
horses[0] = (x, min(h1[1], h2[1]))
t += (x - h1[0]) / h1[1]
del horses[1]
print 'time', t
print 'horses left', horses
horse = horses[0]
t_last = (d - horse[0]) / horse[1]
print 'last horse', horse
print 'time', t_last
return d / (t + t_last)
f = open('out.txt', 'w')
line_count = 1
i = 1
while True:
try:
d, n = lines[line_count].split(' ')
line_count += 1
d = int(d)
n = int(n)
print d, n
horses = []
for j in xrange(line_count, line_count + n):
ki, si = lines[j].split(' ')
line_count += 1
ki = int(ki)
si = int(si)
print ki, si
horses.append((ki, si))
speed = count_speed(d, horses)
print('Case #%s: %f \n' % (i, speed))
f.write('Case #%s: %f \n' % (i, speed))
i += 1
print '-----------------'
except:
break
f.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
85e55310a98606e59016c33c42ad15b467158b08 | 5c0a253bf2fb83db01abc99097871c965f4cf565 | /spark/crm/PROC_O_LNA_XDXT_IND_EDUCATION.py | 1bea83ed976210cbffd85a9d0c063cf7a6807107 | [] | no_license | airuibel/python-1 | 3b16553ede9d069ec56efbb12a89a4de6917a447 | 94f387e2d406fab2128bcfffce6146da720b2ccc | refs/heads/master | 2020-07-05T15:43:00.957221 | 2017-09-17T14:05:48 | 2017-09-17T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,241 | py | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_IND_EDUCATION').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_IND_EDUCATION = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_IND_EDUCATION/*')
O_CI_XDXT_IND_EDUCATION.registerTempTable("O_CI_XDXT_IND_EDUCATION")
#任务[12] 001-01::
V_STEP = V_STEP + 1
#先删除原表所有数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_IND_EDUCATION/*.parquet")
#从昨天备表复制一份全量过来
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_IND_EDUCATION/"+V_DT+".parquet")
F_CI_XDXT_IND_EDUCATION = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_IND_EDUCATION/*')
F_CI_XDXT_IND_EDUCATION.registerTempTable("F_CI_XDXT_IND_EDUCATION")
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.SERIALNO AS SERIALNO
,A.BEGINDATE AS BEGINDATE
,A.ENDDATE AS ENDDATE
,A.SCHOOL AS SCHOOL
,A.DEPARTMENT AS DEPARTMENT
,A.SPECIALTY AS SPECIALTY
,A.DEGREE AS DEGREE
,A.EDUEXPERIENCE AS EDUEXPERIENCE
,A.SCHOOLLENGTH AS SCHOOLLENGTH
,A.DIPLOMANO AS DIPLOMANO
,A.DEGREENO AS DEGREENO
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_IND_EDUCATION A --个人学业履历
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_IND_EDUCATION_INNTMP1 = sqlContext.sql(sql)
F_CI_XDXT_IND_EDUCATION_INNTMP1.registerTempTable("F_CI_XDXT_IND_EDUCATION_INNTMP1")
#F_CI_XDXT_IND_EDUCATION = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_IND_EDUCATION/*')
#F_CI_XDXT_IND_EDUCATION.registerTempTable("F_CI_XDXT_IND_EDUCATION")
sql = """
SELECT DST.CUSTOMERID --客户编号:src.CUSTOMERID
,DST.SERIALNO --流水号:src.SERIALNO
,DST.BEGINDATE --开始日期:src.BEGINDATE
,DST.ENDDATE --结束日期:src.ENDDATE
,DST.SCHOOL --所在学校:src.SCHOOL
,DST.DEPARTMENT --所在院系:src.DEPARTMENT
,DST.SPECIALTY --专业:src.SPECIALTY
,DST.DEGREE --最高学位:src.DEGREE
,DST.EDUEXPERIENCE --最高学历:src.EDUEXPERIENCE
,DST.SCHOOLLENGTH --学制:src.SCHOOLLENGTH
,DST.DIPLOMANO --学历证书号:src.DIPLOMANO
,DST.DEGREENO --学位证书号:src.DEGREENO
,DST.INPUTORGID --登记单位:src.INPUTORGID
,DST.INPUTUSERID --登记人:src.INPUTUSERID
,DST.INPUTDATE --登记日期:src.INPUTDATE
,DST.REMARK --备注:src.REMARK
,DST.FR_ID --法人号:src.FR_ID
,DST.ODS_ST_DATE --系统平台日期:src.ODS_ST_DATE
,DST.ODS_SYS_ID --系统代码:src.ODS_SYS_ID
FROM F_CI_XDXT_IND_EDUCATION DST
LEFT JOIN F_CI_XDXT_IND_EDUCATION_INNTMP1 SRC
ON SRC.CUSTOMERID = DST.CUSTOMERID
AND SRC.SERIALNO = DST.SERIALNO
WHERE SRC.CUSTOMERID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_IND_EDUCATION_INNTMP2 = sqlContext.sql(sql)
dfn="F_CI_XDXT_IND_EDUCATION/"+V_DT+".parquet"
F_CI_XDXT_IND_EDUCATION_INNTMP2=F_CI_XDXT_IND_EDUCATION_INNTMP2.unionAll(F_CI_XDXT_IND_EDUCATION_INNTMP1)
F_CI_XDXT_IND_EDUCATION_INNTMP1.cache()
F_CI_XDXT_IND_EDUCATION_INNTMP2.cache()
nrowsi = F_CI_XDXT_IND_EDUCATION_INNTMP1.count()
nrowsa = F_CI_XDXT_IND_EDUCATION_INNTMP2.count()
F_CI_XDXT_IND_EDUCATION_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_IND_EDUCATION_INNTMP1.unpersist()
F_CI_XDXT_IND_EDUCATION_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_IND_EDUCATION lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
ret = os.system("hdfs dfs -mv /"+dbname+"/F_CI_XDXT_IND_EDUCATION/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/")
#先删除备表当天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/"+V_DT+".parquet")
#从当天原表复制一份全量到备表
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_IND_EDUCATION/"+V_DT+".parquet /"+dbname+"/F_CI_XDXT_IND_EDUCATION_BK/"+V_DT+".parquet")
| [
"cysuncn@126.com"
] | cysuncn@126.com |
b8e1f6454943395c9f5d4751831e30f28927d1b3 | 43e5657beca9836215e43f16da8f274e613ccb18 | /experiment_impact_tracker/emissions/common.py | d833313e4c68463176009a6397e92725ec32b41b | [
"MIT"
] | permissive | ml-lab/experiment-impact-tracker | eaa99b8ef7efec8c624ccc84e19602b7df8e8241 | 7017ed2c88526c2323603b254f8b81710db23ffa | refs/heads/master | 2022-04-01T20:03:19.736764 | 2020-02-14T02:16:16 | 2020-02-14T02:16:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | import experiment_impact_tracker.emissions.us_ca_parser as us_ca_parser
import numpy
REALTIME_REGIONS = {
"US-CA" : us_ca_parser
}
def is_capable_realtime_carbon_intensity(*args, region=None, **kwargs):
return region in list(REALTIME_REGIONS.keys())
def get_realtime_carbon_source(region):
return REALTIME_REGIONS[region].get_realtime_carbon_source()
def get_realtime_carbon(*args, **kwargs):
if 'region' not in kwargs:
raise ValueError("region was not passed to function")
try:
carbon_intensity = REALTIME_REGIONS[kwargs['region']].fetch_supply()[0]['carbon_intensity']
if numpy.isnan(carbon_intensity):
return {
"realtime_carbon_intensity" : "n/a"
}
except:
return {
"realtime_carbon_intensity" : "n/a"
}
return {
"realtime_carbon_intensity" : carbon_intensity
}
| [
"peter.henderson@mail.mcgill.ca"
] | peter.henderson@mail.mcgill.ca |
9d3754884cbbf3bb4fc1923201fceb1d8b22683c | adaa06e70db86a395c76fe9945c04381321ac127 | /neurokernel/routing_table.py | 6bf5a28e9cd5656d5632a3ad5576e1b51544e456 | [
"BSD-3-Clause"
] | permissive | hihihippp/neurokernel | d9a45dbc26ec1b307c8be696469a0e20db809fe2 | f25252a2a2a4443f16ad25b7c5b5eff943f36440 | refs/heads/master | 2020-12-25T23:47:10.899818 | 2014-03-30T18:07:58 | 2014-03-30T18:07:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,485 | py | #!/usr/bin/env python
"""
Routing table class.
"""
import numpy as np
import la
class RoutingTable(object):
"""
Routing table.
Parameters
----------
t : la.larry
Labeled array to use when initializing table.
Notes
-----
Inserting rows or columns of values is not currently supported.
The initial array must be 2D and possess the same list labels for
both of its dimensions.
"""
def __init__(self, t=None):
if t is None:
self._data = None
else:
try:
type(t) == la.larry
t.label[0] == t.label
except:
raise ValueError('invalid initial array')
else:
self._data = t.copy()
def __setitem__(self, key, value):
if type(key) == slice:
raise ValueError('assignment by slice not supported')
if len(key) != 2:
raise KeyError('invalid key')
row, col = key
if not self._data:
label = list(set(key))
self._data = la.larry(np.zeros(2*(len(label),), type(value)),
[label, label])
else:
# If either the row or column identifier isn't in the
# current list of labels, add it:
for k in key:
# Create new row:
if k not in self._data.label[0]:
self._data = self._data.merge(la.larry([[0]*len(self._data.label[1])],
[[k], self._data.label[1]]))
# Create new column:
if k not in self._data.label[1]:
self._data = self._data.merge(la.larry([[0]]*len(self._data.label[0]),
[self._data.label[0], [k]]))
self._data.set([row, col], int(value))
def __getitem__(self, key):
# Index values referring to labels must be wrapped with lists:
reformat_slice = lambda s: slice(s.start if s.start is None else [s.start],
s.stop if s.stop is None else [s.stop],
s.step)
if type(key) == tuple:
key = tuple([reformat_slice(k) if type(k) == slice else [k] for k in key])
elif type(key) == slice:
key = reformat_slice(key)
else:
key = [key]
return self._data.lix[key]
def __copy__(self):
return RoutingTable(self._data)
copy = __copy__
@property
def shape(self):
"""
Shape of table.
"""
return self._data.shape
@property
def ids(self):
"""
IDs currently in routing table.
"""
if self._data is None:
return []
else:
return self._data.label[0]
@property
def coords(self):
"""
List of coordinate tuples of all nonzero table entries.
"""
if self._data is None:
return []
else:
return [tuple(x[0:2]) for x in self._data.totuples() if x[2]]
def row_ids(self, col_id):
"""
Row IDs connected to a column ID.
"""
return [self[:, col_id].label[0][i] for i, e in \
enumerate(self[:, col_id]) if e != 0]
def all_row_ids(self):
"""
All row IDs connected to column IDs.
"""
return [self._data.label[0][i] for i, e in \
enumerate(np.sum(self._data.x, 1, np.bool)) if e]
def col_ids(self, row_id):
"""
Column IDs connected to a row ID.
"""
return [self[row_id, :].label[0][i] for i, e in \
enumerate(self[row_id, :]) if e != 0]
def all_col_ids(self):
"""
All column IDs connected to row IDs.
"""
return [self._data.label[0][i] for i, e in \
enumerate(np.sum(self._data.x, 0, np.bool)) if e]
def __repr__(self):
if self._data is None:
return 'empty'
else:
t = 'ids: ' + repr(self.ids) + '\n' + \
self._data.getx().__repr__()
return t
if __name__ == '__main__':
from unittest import main, TestCase
class test_routingtable(TestCase):
def setUp(self):
self.coords_orig = [('a', 'b'), ('b', 'c')]
self.ids_orig = set([i[0] for i in self.coords_orig]+\
[i[1] for i in self.coords_orig])
self.t = RoutingTable()
for c in self.coords_orig:
self.t[c[0], c[1]] = 1
def test_shape(self):
n = len(self.ids_orig)
assert self.t.shape == (n, n)
def test_ids(self):
assert set(self.t.ids) == self.ids_orig
def test_coords(self):
assert set(self.t.coords) == set(self.coords_orig)
def test_all_row_ids(self):
assert set(self.t.all_row_ids()) == \
set([i[0] for i in self.coords_orig])
def test_all_col_ids(self):
assert set(self.t.all_col_ids()) == \
set([i[1] for i in self.coords_orig])
def test_row_ids(self):
for i in self.coords_orig:
assert i[0] in self.t.row_ids(i[1])
def test_col_ids(self):
for i in self.coords_orig:
assert i[1] in self.t.col_ids(i[0])
main()
| [
"lev@columbia.edu"
] | lev@columbia.edu |
c4487150dcd4a86c20e4becad5574f8dd4551904 | a7587f813492163433202e244df2237c9993a1a1 | /Cart/context_processors.py | 06195e7f77f9cfb23960d2a52fa0e5c627c1280f | [] | no_license | kamran1231/E-COM-WEBSITE-2021 | 3a10bc0059f4d29fc52ee029e4919d4f965174c6 | 32214468cf716cc312a63f6346b8c844f720abda | refs/heads/master | 2023-06-01T03:18:03.137405 | 2021-07-04T14:20:16 | 2021-07-04T14:20:16 | 381,634,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from .models import CartItem,Carts
from .views import _cart_id
def cart_quantity(request):
cart_count = 0
if 'admin' in request.path:
return {}
else:
try:
cart = Carts.objects.filter(cart_id=_cart_id(request))
cart_items = CartItem.objects.all().filter(cart=cart[:1])
for cart_item in cart_items:
cart_count += cart_item.quantity
except Carts.DoesNotExist:
cart_count = 0
return dict(cart_count=cart_count) | [
"khanbrother805@gmail.com"
] | khanbrother805@gmail.com |
44020a2430f1ee4d677ebf2e72f517311b4c61fc | a01fb7bb8e8738a3170083d84bc3fcfd40e7e44f | /python3/test/skip_test.py | a678856ad3b4261a4ba7525d1720c9bb602fa589 | [] | no_license | jk983294/CommonScript | f07acf603611b4691b176aa4a02791ef7d4d9370 | 774bcbbae9c146f37312c771c9e867fb93a0c452 | refs/heads/master | 2023-08-21T17:50:19.036159 | 2023-08-16T00:22:03 | 2023-08-16T00:22:03 | 42,732,160 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import unittest
import os
import platform
class Tests(unittest.TestCase):
def test_0(self):
self.assertTrue(True)
@unittest.skip('skipped test')
def test_1(self):
self.fail('should have failed!')
@unittest.skipIf(os.name == 'posix', 'Not supported on Unix')
def test_2(self):
import winreg
@unittest.skipUnless(platform.system() == 'Darwin', 'Mac specific test')
def test_3(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| [
"jk983294@gmail.com"
] | jk983294@gmail.com |
37d6d2d4799abb3252b0037faeff26b63b15947e | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project/.history/product_20211026233548.py | 4c3e77961db96c2cb4baab24e8753d3719e59621 | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 6,893 | py | import data as list_product
import random
# def __init__(self, Id, Product_code, Product_name, Brand, Year, Size):
# self.Id = Id
# self.Product_code = Product_code
# self.Product_name = Product_name
# self.Brand = Brand
# self.Year = Year
# self.Size = Size
# Thêm sản phẩm
def AddProduct():
print("THÊM SẢN PHẨM")
product = {
"Id": "",
"Product_code": "",
"Product_name": "",
"Brand": "",
"Price": "",
"Year": "",
"Quantity": "",
"Size": ""
}
print("Nhập ID sản phẩm:")
Id = int(input())
while True:
student = FindProductDuplicate(Id)
if student != False:
print("ID đã tồn tại, vui lòng nhập lại ID:")
Id = int(input())
else:
break
product['Id'] = Id
# Mã sản phẩm random
code_product = random.randint(1, 99)
str_id = "HKSP"
if code_product <= 9:
str_id += "0" + str(code_product)
else:
str_id += str(code_product)
product["Product_code"] = str_id
print("Nhập tên sản phẩm: ")
product['Product_name'] = input()
print("Nhập thương hiệu sản phẩm: ")
product['Brand'] = input()
print("Nhập giá sản phẩm: ")
product['Price'] = float(input())
print("Nhập năm sản xuất: ")
product['Year'] = int(input())
print("Nhập số lượng: ")
product['Quantity'] = int(input())
print("Nhập size giày: ")
product['Size'] = input()
list_product.list_product.append(product)
answer = input("Bạn có muốn nhập tiếp không? Y/N ")
if answer == "y" or answer == "Y":
AddProduct()
# Tìm kiếm ID trùng lặp
def FindProductDuplicate(Id):
for i in range(0, len(list_product.list_product)):
if list_product.list_product[i]['Id'] == Id:
return [i, list_product.list_product[i]]
return False
# Hiển thị tất cả sản phẩm
def ShowAllProduct():
print("*** HIỂN THỊ TẤT CẢ SẢN PHẨM ***")
if len(list_product.list_product) == 0 or len(list_product.list_product) < 0:
print("Chưa có sản phẩm nào để hiển thị! ".upper())
for i in range(0, len(list_product.list_product)):
print("ID: ", list_product.list_product[i]['Id']),
print("Mã sản phẩm: ", list_product.list_product[i]['Product_code']),
print("Tên sản phẩm: ", list_product.list_product[i]['Product_name']),
print("Thương hiệu: ", list_product.list_product[i]['Brand']),
print("Giá: ", list_product.list_product[i]['Price']),
print("Năm xuất bản: ", list_product.list_product[i]['Year']),
print("Số lượng: ", list_product.list_product[i]['Quantity']),
print("Size giày: ", list_product.list_product[i]['Size'])
print("________________________________")
# Sửa thông tin sản phẩm
def UpdateProduct():
print("*** CẬP NHẬT THÔNG TIN SẢN PHẨM ***")
print("Nhập ID sản phẩm cần sửa")
Id = int(input())
product = FindProductDuplicate(Id)
if product == False:
print("Không tìm thấy sản phẩm ID = ", Id)
else:
print("""Bạn muốn cập nhật mục nào ? :
0. Thoát.
1. Tên sản phẩm.
2. Thương hiệu sản phẩm.
3. Giá sản phẩm
4. Size giày.
5. Số lượng.
6. Năm xuất bản. """)
action = 0
while action >= 0:
if action == 1:
UpdateProductName()
elif action == 2:
UpdateProductBrand()
elif action == 3:
UpdateProductPrice()
elif action == 4:
UpdateProductSize()
elif action == 5:
UpdateProductQuatity()
elif action == 6:
UpdateProductYear()
def UpdateProductName():
print("Nhập tên sản phẩm")
name_product = input()
product[1]['Product_name'] = name_product
def UpdateProductBrand():
print("Nhập thương hiệu của sản phẩm")
name_product = input()
product[1]['Brand'] = name_product
def UpdateProductPrice():
print("Nhập giá mới của sản phẩm")
name_product = float(input())
product[1]['Price'] = name_product
def UpdateProductSize():
print("Nhập size của sản phẩm")
name_product = input()
product[1]['Size'] = name_product
def UpdateProductYear():
print("Nhập năm sản xuất của sản phẩm")
name_product = int(input())
product[1]['Year'] = name_product
list_product.list_product[product[0]] = product[1]
def UpdateProductQuatity():
print("Nhập số lượng sản phẩm")
name_product = int(input())
product[1]['Quantity'] = name_product
list_product.list_product[product[0]] = product[1]
action = int(input("Bạn chọn mục cập nhật nào? "))
if action == 0:
print("Không cập nhật mục nào")
break
# Xóa sản phẩm
def DeleteProduct():
print("*** XÓA SẢN PHẨM ***")
print("Nhập ID sản phẩm cần xóa:")
Id = int(input())
product = FindProductDuplicate(Id)
if product != False:
list_product.list_product.remove(product[1])
print("Xóa sản phẩm thành công!")
else:
print("Không tìm thấy sản phẩm muốn xóa!")
# Tìm kiếm sản phẩm
def FindProductByName():
print("*** TÌM KIẾM SẢN PHẨM ***")
new_list = []
NameProduct = str(
input("Nhập tên sản phẩm hoặc tên thương hiệu bạn muốn tìm kiếm: ")).upper()
for i in range(0, len(list_product.list_product)):
if str(list_product.list_product[i]['Product_name']).upper() in NameProduct or str(list_product.list_product[i]['Brand']).upper() in NameProduct:
new_list.append(i)
for i in range(0, len(new_list)):
print("ID: ", new_list[1]['Id']),
print("Mã sản phẩm: ",
new_list[2]['Product_code']),
print("Tên sản phẩm: ",
new_list[3]['Product_name']),
print("Thương hiệu: ", new_list[4]['Brand']),
print("Giá: ", new_list[5]['Price']),
print("Năm xuất bản: ", new_list[6]['Year']),
print("Số lượng: ", new_list[i]['Quantity']),
print("Size giày: ", new_list[i]['Size'])
print("________________________________")
else:
print("Không tìm thấy sản phẩm này @@".upper())
| [
"phanthituyngoc1995@gmail.com"
] | phanthituyngoc1995@gmail.com |
b4ab245bcbf76e57839fcc1722d1ddd565a89c78 | fe82835f39ec48daa8e9d425f66ededac0347d2a | /python全栈开发/网络爬虫/myscrapy/movie/movie/spiders/douban.py | c52070006e5218aad783dc97f510a089c396ec4e | [] | no_license | Abel-Fan/UAIF1907 | 3a43d7c93b71f64d76b4b7ea2e668a46c8fa10fa | dc53af8cbf3d15f356d52c032c62251fd2536222 | refs/heads/master | 2023-01-10T16:32:07.760465 | 2019-10-29T02:55:01 | 2019-10-29T02:55:01 | 199,558,420 | 2 | 3 | null | 2023-01-04T22:47:49 | 2019-07-30T02:20:57 | Python | UTF-8 | Python | false | false | 738 | py | # -*- coding: utf-8 -*-
import scrapy
class DoubanSpider(scrapy.Spider):
name = 'douban'
allowed_domains = ['movie.douban.com']
index = 0
url = 'https://movie.douban.com/top250?start='
start_urls = ['https://movie.douban.com/top250?start=0']
def parse(self, response):
for sel in response.xpath("//div[@class='hd']"):
url = sel.xpath("a/@href").extract()[0]
yield scrapy.Request(url,callback=self.parseCon)
self.index+=25
if self.index<226:
yield scrapy.Request(self.url+str(self.index),callback=self.parse)
def parseCon(self,response):
# 详情
title = response.xpath("//h1/span[1]/text()").extract()[0]
print(title)
| [
"842615663@qq.com"
] | 842615663@qq.com |
25189a323700e225e6f96c440e680102061762f0 | 509823ea14f04d5791486b56a592d7e7499d7d51 | /parte05/ex5.09_suma_valores_diccionario.py | 618b2f2b3e475e6ab8a3018557ce2d1f48b3d8d1 | [] | no_license | Fhernd/Python-CursoV2 | 7613144cbed0410501b68bedd289a4d7fbefe291 | 1ce30162d4335945227f7cbb875f99bc5f682b98 | refs/heads/master | 2023-08-08T05:09:44.167755 | 2023-08-05T19:59:38 | 2023-08-05T19:59:38 | 239,033,656 | 64 | 38 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | # Ejercicio 5.9: Sumar todos los valores de un diccionario.
productos = {'Mouse': 29.9, 'Teclado': 119.9, 'Audífonos': 35.9, 'Monitor': 299}
total = sum(productos.values())
print('El total de los productos es:', total)
| [
"johnortizo@outlook.com"
] | johnortizo@outlook.com |
b0fa16737a686df72fd98cff9f7fd566d0d5e80a | 2ed0ab730b62665b3a36841ab006eea961116f87 | /Hash/MaximumSizeSubArray.py | 4c44f5c8ae5645363b1ad573c522e8e3413e73b1 | [] | no_license | scarlettlite/hackathon | 0f0a345d867b9e52823f10fe67c6ec210a40945f | 179ba9038bbed4d48cb2f044fd8430cf2be2bab3 | refs/heads/master | 2021-07-04T00:55:17.665292 | 2019-03-04T09:10:59 | 2019-03-04T09:10:59 | 141,269,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | class Solution:
def maxSubArrayLen(self, nums, k):
index, l, sm = {}, 0, 0
"""
if some subarray adds up to zero,
then include it
"""
index[0] = -1
for i, num in enumerate(nums):
sm += num
"""
sm - (sm - k) = k
"""
if sm - k in index:
l = max(l, i - index[sm - k])
if sm not in index:
"""
for each sum note the earliest index at which the sum occurs
"""
index[sm] = i
return l
print(Solution().maxSubArrayLen([-2, -1, 2, 1], 1)) | [
"shivanirathore496@gmail.com"
] | shivanirathore496@gmail.com |
c7bdf27bb365b6e35ca7896779eabc935dc9456e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02410/s129903146.py | f4bc923b5e62b74d7db52836dc67ef4179c3d122 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | n,m = map(int,input().split())
mat = list()
vec = list()
ans = [0 for _ in range(n)]
for _ in range(n):
mat.append(list(map(int,input().split())))
for _ in range(m):
vec.append(int(input()))
for i in range(n):
for j in range(m):
ans[i] += mat[i][j]*vec[j]
for k in ans :
print(k) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5c512f565ac92a4de97031a9ab515142b7a4c682 | 9a066d64eb81bd65fb51790579b7e9ea874beb3b | /seisma/conf/default.py | 05ffc3b4464d0bd1557433dc5584a97bcaacda7b | [] | no_license | crisscuola/seisma-server | 377e56ec6d636b056f95c6b425cbfebd62f8ec3e | 1ea66ac986e4e3bd6911d572e278daf7ff728c75 | refs/heads/master | 2021-06-12T08:26:16.442942 | 2017-03-17T20:14:16 | 2017-03-17T20:14:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | # -*- coding: utf-8 -*-
import sys
# Application settings
DEBUG = False
TESTING = False
# Database settings
DATABASE = {
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'root',
'PASSWORD': '',
'NAME': 'seisma',
'POOL_SIZE': 10,
'POOL_TIMEOUT': 10,
'POOL_RECYCLE': 60 * 5,
'MAX_OVERFLOW': -1,
'SQL_LOG': False,
'TRACK_MODIFICATIONS': False,
}
# Cache
REDIS_CACHE = {
'HOST': '127.0.0.1',
'PORT': 6379,
'DB': 0,
'IS_DISABLED': False,
'MAX_CONNECTIONS': 15,
'GET_CONNECTION_TIMEOUT': 10,
}
# Logging settings
LOGGING_SETTINGS = {
'version': 1,
'formatters': {
'basic': {
'format': '%(asctime)-15s %(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'stream': sys.stderr,
'formatter': 'basic'
},
'null': {
'class': 'logging.NullHandler',
'level': 'DEBUG'
},
},
'loggers': {
'seisma': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'flask': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'flask_sqlalchemy': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
'flask_migrate': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
'flask_script': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
'sqlalchemy': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': False,
},
},
'root': {
'propagate': False,
'handlers': ['console'],
'level': 'INFO',
},
}
# clear results over days
ROTATE_FOR_DAYS = 365
# max time of build in minutes
MAX_BUILD_TIMEOUT = 60
| [
"mikhail.trifonov@corp.mail.ru"
] | mikhail.trifonov@corp.mail.ru |
aa5bd5a0f7c81ee9514591e0bfa78361be291fc8 | e75a40843a8738b84bd529a549c45776d09e70d9 | /samples/openapi3/client/petstore/python-aiohttp/petstore_api/api/another_fake_api.py | 3ba5d212b863746d51bea35a12aa8f8ac687322d | [
"Apache-2.0"
] | permissive | OpenAPITools/openapi-generator | 3478dbf8e8319977269e2e84e0bf9960233146e3 | 8c2de11ac2f268836ac9bf0906b8bb6b4013c92d | refs/heads/master | 2023-09-02T11:26:28.189499 | 2023-09-02T02:21:04 | 2023-09-02T02:21:04 | 133,134,007 | 17,729 | 6,577 | Apache-2.0 | 2023-09-14T19:45:32 | 2018-05-12T09:57:56 | Java | UTF-8 | Python | false | false | 8,023 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
The version of the OpenAPI document: 1.0.0
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
""" # noqa: E501
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from typing import overload, Optional, Union, Awaitable
from pydantic import Field
from petstore_api.models.client import Client
from petstore_api.api_client import ApiClient
from petstore_api.api_response import ApiResponse
from petstore_api.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class AnotherFakeApi:
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None) -> None:
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@overload
async def call_123_test_special_tags(self, client : Annotated[Client, Field(..., description="client model")], **kwargs) -> Client: # noqa: E501
...
@overload
def call_123_test_special_tags(self, client : Annotated[Client, Field(..., description="client model")], async_req: Optional[bool]=True, **kwargs) -> Client: # noqa: E501
...
@validate_arguments
def call_123_test_special_tags(self, client : Annotated[Client, Field(..., description="client model")], async_req: Optional[bool]=None, **kwargs) -> Union[Client, Awaitable[Client]]: # noqa: E501
"""To test special tags # noqa: E501
To test special tags and operation ID starting with number # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.call_123_test_special_tags(client, async_req=True)
>>> result = thread.get()
:param client: client model (required)
:type client: Client
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request.
If one number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Client
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
message = "Error! Please call the call_123_test_special_tags_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501
raise ValueError(message)
if async_req is not None:
kwargs['async_req'] = async_req
return self.call_123_test_special_tags_with_http_info(client, **kwargs) # noqa: E501
@validate_arguments
def call_123_test_special_tags_with_http_info(self, client : Annotated[Client, Field(..., description="client model")], **kwargs) -> ApiResponse: # noqa: E501
"""To test special tags # noqa: E501
To test special tags and operation ID starting with number # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.call_123_test_special_tags_with_http_info(client, async_req=True)
>>> result = thread.get()
:param client: client model (required)
:type client: Client
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Client, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'client'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method call_123_test_special_tags" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['client'] is not None:
_body_params = _params['client']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = [] # noqa: E501
_response_types_map = {
'200': "Client",
}
return self.api_client.call_api(
'/another-fake/dummy', 'PATCH',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| [
"noreply@github.com"
] | OpenAPITools.noreply@github.com |
d6cb7279f5266fe1ccfbfb7eab62fa9820400359 | 4af394289f00e654b5f1611d3acc11956ff40250 | /doc/samples/qdiipred.py | 212ca14acc9f14e692e177602bbf7c9ef6e67f1a | [
"MIT"
] | permissive | vensentzhou/xalpha | 159b6d4d325878f830abae1fcd8c8e27bbd41b4f | 03537dc009c4c15416bfe385a07c7068950d1152 | refs/heads/master | 2023-03-05T00:11:50.186495 | 2021-02-14T05:32:21 | 2021-02-14T05:32:21 | 319,993,901 | 0 | 0 | MIT | 2021-02-14T05:32:22 | 2020-12-09T15:11:13 | null | UTF-8 | Python | false | false | 3,672 | py | """
一个简单展示 qdii 实时净值预测的例子,最大限度的利用缓存而减少网络请求
"""
import pandas as pd
import xalpha as xa
import logging
xa.set_backend(backend="csv", path="../../../lof/data", precached="20200103")
logger = logging.getLogger("xalpha")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
@xa.universal.lru_cache_time(ttl=180)
def cached_get_rt(code, **kws):
return xa.get_rt(code, handler=False)
@xa.universal.lru_cache_time(ttl=1800)
def cached_get_bar(code, *args, **kws):
if code.startswith("commodities/"):
kws["handler"] = False
return xa.get_bar(code, *args, **kws)
return None
xa.set_handler(method="rt", f=cached_get_rt)
xa.set_handler(method="bar", f=cached_get_bar)
qdiis = [
"SH501018",
"SZ160416",
"SZ161129",
"SZ160723",
"SZ160216",
"SZ162411",
"SZ163208",
"SZ162719",
"SZ165513",
"SZ161815", # fr
"SZ161116", # lu
"SZ164701",
"SZ160719",
"SZ164824",
"SH513030",
"SZ160140",
"SZ165510",
"SZ164906",
"SH513050",
]
nonqdiis = [
"SH501021",
"SH513880",
"SH513520",
"SH513000",
"SH510510",
"SZ159922",
"SH510500",
"SH512500",
"SZ159920",
]
data = {
"code": [],
"name": [],
"t1": [],
"t0": [],
"now": [],
"t1rate": [],
"t0rate": [],
"position": [],
}
for c in qdiis:
p = xa.QDIIPredict(c, fetch=True, save=True, positions=True)
try:
data["t1"].append(round(p.get_t1(return_date=False), 4))
data["t1rate"].append(round(p.get_t1_rate(return_date=False), 2))
try:
data["t0"].append(round(p.get_t0(return_date=False), 4))
data["t0rate"].append(round(p.get_t0_rate(return_date=False), 2))
except ValueError:
data["t0"].append("-")
data["t0rate"].append("-")
data["position"].append(round(p.get_position(return_date=False), 3))
data["now"].append(xa.get_rt(c)["current"])
data["code"].append(c)
data["name"].append(xa.get_rt(c)["name"])
except xa.exceptions.NonAccurate as e:
print("%s cannot be predicted exactly now" % c)
print(e.reason)
for c in nonqdiis:
try:
p = xa.RTPredict(c)
data["t0"].append(round(p.get_t0(return_date=False), 4))
data["t0rate"].append(round(p.get_t0_rate(return_date=False), 2))
data["t1"].append(xa.get_rt("F" + c[2:])["current"])
data["t1rate"].append("-")
data["position"].append("-")
data["now"].append(xa.get_rt(c)["current"])
data["code"].append(c)
data["name"].append(xa.get_rt(c)["name"])
except xa.exceptions.NonAccurate as e:
print("%s cannot be predicted exactly now" % c)
print(e.reason)
df = pd.DataFrame(data)
htmlstr = (
"""<html>
<meta charset="UTF-8">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
<script src="https://ajax.aspnetcdn.com/ajax/jQuery/jquery-3.4.1.min.js"></script>
<script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
<script>
$(document).ready( function () {
$('#df').DataTable({"scrollY": "88%",
"scrollCollapse": true,
"paging": false,
"fixedHeader": true
});
} );
</script>
<style>
td, th {
text-align: center;
}
#df tbody tr:hover {
background-color: #ffff99;
}
</style>"""
+ df.to_html(table_id="df", index=False)
+ "</html>"
)
with open("demo.html", "w") as f:
f.writelines([htmlstr])
| [
"kcanamgal@foxmail.com"
] | kcanamgal@foxmail.com |
e383ea3229f57026689c505f94d5e91c01c69485 | 0f6db051f895b5f4621610c9f84eb678e83629bf | /src/learn/train_peak_model.py | 1ce3a6d64474cb7d8a8f37b07a708d41f97ffe86 | [
"MIT"
] | permissive | stonewell/learn-curve | 313b18c01b0acc208390bf8589c674c7e758cdba | ae787e2f84d3b91f59257b500b11b1dd8904430e | refs/heads/main | 2022-05-25T10:38:44.238297 | 2022-04-01T17:04:12 | 2022-04-01T17:04:12 | 61,493,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,335 | py | import os
import sys
import argparse
import datetime
import logging
import json
import pathlib
import pandas as pd
import numpy as np
from learn.peak_analyze import PeakAnalyze
sys.dont_write_bytecode = True
def valid_date(s):
try:
return datetime.datetime.strptime(s, "%Y%m%d").date()
except:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="print debug information", action="count", default=0)
parser.add_argument("-v", "--version", action="version", version='%(prog)s 1.0')
parser.add_argument("-n", "--interval", type=int, help='days interval for training', default=5)
parser.add_argument('-o', "--output", help="save generated model to the file", required=True,
type=pathlib.Path, metavar='<output file>')
parser.add_argument('-i', "--input", help="training data directory for peak analysis", required=True,
type=pathlib.Path, metavar='<training data directory>')
parser.add_argument('-t', "--validate_input", help="training validating data directory for peak analysis", required=True,
type=pathlib.Path, metavar='<training validating data directory>')
return parser.parse_args()
def validate_args(args):
if args.interval <= 0:
raise argparse.ArgumentTypeError('invalid training interval:{}'.format(args.interval))
if not args.input.is_dir():
raise argparse.ArgumentTypeError('invalid training data directory:{}'.format(args.input))
if not args.validate_input.is_dir():
raise argparse.ArgumentTypeError('invalid training validation data directory:{}'.format(args.validate_input))
def main():
args = parse_arguments()
if args.debug > 0:
logging.getLogger('').setLevel(logging.DEBUG)
else:
logging.getLogger('').setLevel(logging.INFO)
logging.debug('debug level:{}'.format(args.debug))
validate_args(args)
features, label = load_features_label(args.input, args)
v_features, v_label = load_features_label(args.validate_input, args)
train_model(features, label,
v_features, v_label,
args)
def load_features_label(input_dir, args):
features = []
label = []
for training_file in input_dir.iterdir():
training_data = load_data(training_file, args)
if len(training_data) < args.interval:
logging.warning('{} too few training data, need at least {}.'.format(training_file, args.interval))
continue
features_, label_ = build_features_and_label(training_data, args)
features.extend(features_)
label.extend(label_)
return features, label
def load_data(training_file, args):
return pd.read_csv(training_file, index_col=0).fillna(0)
def build_features_and_label(training_data, args):
features_data = training_data[training_data.columns[:-1]].values.tolist()
label_data = training_data['trade'].tolist()
features = []
label = []
for index in range(0, len(features_data) - args.interval, 1):
entries = features_data[index : index + args.interval]
features.append(entries)
l = label_data[index + args.interval]
#if before or after change range label is not 0
#and current label is 0, use before/after label
check_range = 1
if l == 0:
for t in range(check_range):
try:
l1 = label_data[index + args.interval - t - 1]
except:
l1 = 0
try:
l2 = label_data[index + args.interval + t + 1]
except:
l2 = 0
l = l1 if l1 != 0 else l2 if l2 != 0 else 0
if l != 0:
break
label.append(l)
#one hot encode label
label_array = np.zeros((len(label), 3), dtype = np.int8)
for label_index, l in enumerate(label):
l_index = l if l == 0 else 1 if l == 50 else 2
label_array[label_index, l_index] = 1
return features, label_array.tolist()
def train_model(features, label,
v_features, v_label,
args):
train_model_keras_rnn(features, label,
v_features, v_label,
args)
def train_model_keras_rnn(features, label,
v_features, v_label,
args):
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
from keras import Input
from tensorflow.keras import optimizers
model = Sequential()
model.add(Input(shape=(args.interval, len(features[0][0]))))
# use default activation to use cuDNN kernel
model.add(LSTM(512,
return_sequences=True,
dropout=.1,
#activation='relu'
))
model.add(LSTM(128,
return_sequences=False,
dropout=.1,
#activation='relu'
))
model.add(Dense(64,
activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3,
activation='softmax'))
optimizer = optimizers.Adam()
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
#loss='mean_squared_error',
metrics=['accuracy'])
model.summary()
from keras.callbacks import EarlyStopping, ModelCheckpoint
# Create callbacks
callbacks = [
EarlyStopping(monitor='val_loss', patience=5),
ModelCheckpoint(args.output,
save_best_only=True,
save_weights_only=False)
]
history = model.fit(features,
label,
epochs=150,
callbacks=callbacks,
validation_data=(v_features, v_label))
if __name__ == '__main__':
main()
| [
"jingnan.si@gmail.com"
] | jingnan.si@gmail.com |
23bf6fd9231c759ca6394cdab98810466382cf77 | 82a9077bcb5a90d88e0a8be7f8627af4f0844434 | /google-cloud-sdk/lib/tests/unit/surface/anthos/create_login_config_test.py | 455176b1ecbce0ed2d1131aa979b3d28b42a7223 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | piotradamczyk5/gcloud_cli | 1ae2553595e569fad6ce84af62b91a7ee5489017 | 384ece11040caadcd64d51da74e0b8491dd22ca3 | refs/heads/master | 2023-01-01T23:00:27.858583 | 2020-10-21T04:21:23 | 2020-10-21T04:21:23 | 290,238,061 | 0 | 0 | null | 2020-10-19T16:43:36 | 2020-08-25T14:31:00 | Python | UTF-8 | Python | false | false | 2,107 | py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""anthos auth create login config tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from tests.lib import test_case
from tests.lib.surface.anthos import test_base as anthos_test_base
class CreateLoginConfigTest(anthos_test_base.AuthUnitTestBase):
def testCreateLoginConfigWithDefaults(self):
self.Run('anthos create-login-config --kubeconfig my-config.yaml')
self.AssertValidBinaryCall(
env={'COBRA_SILENCE_USAGE': 'true', 'GCLOUD_AUTH_PLUGIN': 'true'},
command_args=[
anthos_test_base._MOCK_ANTHOS_AUTH_BINARY,
'create-login-config',
'--kubeconfig',
'my-config.yaml',])
self.AssertErrContains('Configuring Anthos authentication')
def testCreateLoginConfigExplicit(self):
self.Run('anthos create-login-config --kubeconfig my-config.yaml '
'--output my_output.yaml --merge-from orig-config.yaml')
self.AssertValidBinaryCall(
env={'COBRA_SILENCE_USAGE': 'true', 'GCLOUD_AUTH_PLUGIN': 'true'},
command_args=[
anthos_test_base._MOCK_ANTHOS_AUTH_BINARY,
'create-login-config',
'--kubeconfig',
'my-config.yaml',
'--output',
'my_output.yaml',
'--merge-from',
'orig-config.yaml',
])
self.AssertErrContains('Configuring Anthos authentication')
if __name__ == '__main__':
test_case.main()
| [
"code@bootstraponline.com"
] | code@bootstraponline.com |
4a4a579f82001e30dd5e40cfb88a7d41554d1279 | 74e3bf0160007fb1e6908879fe743b6cd74fd379 | /python/day09/weavo.py | b6dd802b26541e558a0d2cfaf6009b7dd593dce8 | [] | no_license | edutak/TIL-2 | 7a5586081af9172b1143dd4aaddef8954fe9fe81 | 4f736019883c5153b2afeb7b490014b9bd569b18 | refs/heads/master | 2023-05-08T16:48:26.283999 | 2021-06-04T06:40:39 | 2021-06-04T06:40:39 | 373,745,204 | 0 | 0 | null | 2021-06-04T06:37:17 | 2021-06-04T06:37:16 | null | UTF-8 | Python | false | false | 776 | py | class WeadbVO:
def __init__(self,city,province,tmn,tmx,date):
self.__city = city;
self.__province = province;
self.__tmn = tmn;
self.__tmx = tmx;
self.__date = date;
def __str__(self):
return '%s, %s, %.2f %.2f %s' % (self.__city,self.__province,self.__tmn,self.__tmx,self.__date);
def getCity(self):
return self.__city;
def getProvince(self):
return self.__province;
def setProvince(self, province):
self.__province = province;
def getTmn(self):
return self.__tmn;
def setTmn(self, tmn):
self.__tmn = tmn;
def getTmx(self):
return self.__tmx;
def setTmx(self, tmx):
self.__tmx = tmx;
def getDate(self):
return self.__date;
| [
"cjdauddl93@gmail.com"
] | cjdauddl93@gmail.com |
653ae4b5122947a39abcf9bcadca408111886667 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Pygame Tutorials/examples/steering/part06.py | bd8e17f8ca75e30f7b3554d7d92e86b3eb1b77c9 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:5048d8b9515d53a57667319a2cf5946943860bce3c9fe681264f90e123930e47
size 6107
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
c2d538bc309a95e5b9656e96ad3030ac4e03d3bc | fb881b00c607fbe9a2f29005965e2648958c608c | /dynamodb-boto3-backup-table.py | 718c86ee279af7fa6ad558854cb87445a1c65036 | [] | no_license | renauddahou/boto3_dynamodb_scripts | b7afd604893c3bc9f6f5b774ef4d88e356ae8d0d | 97a380b09a4488b5a8483547c70879711630964a | refs/heads/main | 2023-07-12T11:20:38.335497 | 2021-09-02T20:10:03 | 2021-09-02T20:10:03 | 416,806,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import boto3
client = boto3.client('dynamodb')
response = client.create_backup(
TableName='Employees',
BackupName='Employees-Backup-01'
)
print(response) | [
"noreply@github.com"
] | renauddahou.noreply@github.com |
78b824aa0d72d6431c4d74492b5aee080e1290b6 | b005d794cfd8e3b063b08d6a266b1e07f0f0f5e9 | /src/webapp/appengine_admin/main.py | 250c885c61e10ae0f5ddfa5bbc954edd335fff42 | [] | no_license | GeoRemindMe/GeoRemindMe_Web | 593c957faa5babb3040da86d94a5d884ad4b2db3 | d441693eedb32c36fe853895110df808a9959941 | refs/heads/master | 2021-01-16T18:29:39.633445 | 2011-11-05T23:50:37 | 2011-11-05T23:50:37 | 1,841,418 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
from django.conf import settings
_ = settings.TEMPLATE_DIRS
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from appengine_admin.views import Admin
from appengine_admin.model_register import ModelAdmin, register
import appengine_admin.models_admin
application = webapp.WSGIApplication([
(r'^(/admin)(.*)$', Admin),
])
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main() | [
"jneight@gmail.com"
] | jneight@gmail.com |
911819b86225ec2196c19e866f1cae7ac2cbe89e | bee9d96912078d68877aa53e0c96537677ec3e6a | /peakpo/ds_powdiff/DiffractionPattern.py | b8b590a3372b5900968724dcec12c537c2c9d524 | [
"Apache-2.0"
] | permissive | SHDShim/PeakPo | ce0a637b6307787dd84fd3dcb3415e752d180c32 | 4c522e147e7715bceba218de58ee185cccd2055e | refs/heads/master | 2022-06-26T11:26:45.097828 | 2022-06-19T22:03:24 | 2022-06-19T22:03:24 | 94,345,216 | 17 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,673 | py | import numpy as np
import os
import time
import datetime
from utils import writechi, readchi, make_filename
from .background import fit_bg_cheb_auto
class Pattern(object):
"""
This modified from the same object in ds_* modules
"""
def __init__(self, filename=None):
if filename is None:
self.x_raw = None
self.y_raw = None
else:
self.fname = filename
self.read_file(filename)
self.x_bgsub = None
self.y_bgsub = None
self.x_bg = None
self.y_bg = None
self.params_chbg = [20, 10, 20]
def read_file(self, fname):
"""
read a chi file and get raw xy
"""
if fname.endswith('.chi'):
data = np.loadtxt(fname, skiprows=4)
twotheta, intensity = data.T
else:
raise ValueError('Only support CHI, MSA, and EDS formats')
# set file name information
self.fname = fname
self.x_raw = twotheta
self.y_raw = intensity
def _get_section(self, x, y, roi):
if roi[0] >= x.min() and roi[1] <= x.max():
i_roimin = np.abs(x - roi[0]).argmin()
i_roimax = np.abs(x - roi[1]).argmin()
x_roi = x[i_roimin:i_roimax]
y_roi = y[i_roimin:i_roimax]
return x_roi, y_roi
else:
print(str(datetime.datetime.now())[:-7],
": Error: ROI should be smaller than the data range")
return x, y
def get_section(self, roi, bgsub=True):
"""
return a section for viewing and processing
"""
if bgsub:
return self._get_section(self.x_bgsub, self.y_bgsub, roi)
else:
return self._get_section(self.x_raw, self.y_raw, roi)
def _get_bg(self, roi, params=None, yshift=0.):
if params is not None:
self.params_chbg = params
x, y = self._get_section(self.x_raw, self.y_raw, roi)
t_start = time.time()
y_bg = fit_bg_cheb_auto(x, y, self.params_chbg[0],
self.params_chbg[1], self.params_chbg[2])
print(str(datetime.datetime.now())[:-7],
": Bgsub takes {0:.2f}s".format(time.time() - t_start))
self.x_bg = x
self.x_bgsub = x
y_bgsub = y - y_bg
"""
if y_bgsub.min() <= 0:
y_bgsub = y_bgsub - y_bgsub.min() + yshift
"""
self.y_bgsub = y_bgsub
self.y_bg = y_bg
self.roi = roi
def subtract_bg(self, roi, params=None, yshift=10.):
print(str(datetime.datetime.now())[:-7], ": Receive BG subtraction")
self._get_bg(roi, params=params, yshift=yshift)
def get_raw(self):
return self.x_raw, self.y_raw
def get_background(self):
return self.x_bg, self.y_bg
def get_bgsub(self):
return self.x_bgsub, self.y_bgsub
def get_bg(self):
return self.x_bg, self.y_bg
def set_bg(self, x_bg, y_bg, x_bgsub, y_bgsub, roi, bg_params):
self.x_bg = x_bg
self.y_bg = y_bg
self.x_bgsub = x_bgsub
self.y_bgsub = y_bgsub
self.roi = roi
self.params_chbg = bg_params
def get_chbg(self, roi, params=None, chiout=False, yshift=10.):
"""
subtract background from raw data for a roi and then store in
chbg xy
"""
self._get_bg(roi, params=params, yshift=yshift)
if chiout:
# write background file
f_bg = os.path.splitext(self.fname)[0] + '.bg.chi'
text = "Background\n" + "2-theta, CHEB BG:" + \
' '.join(map(str, self.params_chbg)) + "\n\n"
writechi(f_bg, self.x_bgsub, self.y_bg, preheader=text)
# write background subtracted file
f_bgsub = os.path.splitext(self.fname)[0] + '.bgsub.chi'
text = "Background subtracted diffraction pattern\n" + \
"2-theta, CHEB BG:" + ' '.join(map(str, self.params_chbg)) + "\n\n"
writechi(f_bgsub, self.x_bgsub, self.y_bgsub, preheader=text)
def read_bg_from_tempfile(self, temp_dir=None):
bgsub_filen, bg_filen = self.make_temp_filenames(temp_dir=temp_dir)
if os.path.exists(bgsub_filen) and os.path.exists(bg_filen):
roi, bg_params, x_bgsub, y_bgsub = readchi(bgsub_filen)
__, __, x_bg, y_bg = readchi(bg_filen)
self.set_bg(x_bg, y_bg, x_bgsub, y_bgsub, roi, bg_params)
return True
else:
return False
def make_temp_filenames(self, temp_dir=None):
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
bgsub_filen = make_filename(self.fname, 'bgsub.chi',
temp_dir=temp_dir)
bg_filen = make_filename(self.fname, 'bg.chi',
temp_dir=temp_dir)
return bgsub_filen, bg_filen
def temp_files_exist(self, temp_dir=None):
bgsub_filen, bg_filen = self.make_temp_filenames(temp_dir=temp_dir)
if os.path.exists(bgsub_filen) and os.path.exists(bg_filen):
return True
else:
return False
def write_temporary_bgfiles(self, temp_dir):
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
bgsub_filen, bg_filen = self.make_temp_filenames(temp_dir=temp_dir)
x_bgsub, y_bgsub = self.get_bgsub()
x_bg, y_bg = self.get_bg()
preheader_line0 = \
'# BG ROI: {0: .5f}, {1: .5f} \n'.format(self.roi[0], self.roi[1])
preheader_line1 = \
'# BG Params: {0: d}, {1: d}, {2: d} \n'.format(
self.params_chbg[0], self.params_chbg[1], self.params_chbg[2])
preheader_line2 = '\n'
writechi(bgsub_filen, x_bgsub, y_bgsub, preheader=preheader_line0 +
preheader_line1 + preheader_line2)
writechi(bg_filen, x_bg, y_bg, preheader=preheader_line0 +
preheader_line1 + preheader_line2)
class PatternPeakPo(Pattern):
'''
Do not update this.
Exist only for reading old PPSS files.
Do not delete this, if so old PPSS cannot be read.
This is used only for old PPSS file.
'''
def __init__(self):
self.color = 'white'
self.display = False
self.wavelength = 0.3344
def get_invDsp(self):
self.invDsp_raw = np.sin(np.radians(self.x_raw / 2.)) \
* 2. / self.wavelength
self.invDsp_bgsub = np.sin(np.radians(self.x_bgsub / 2.)) \
* 2. / self.wavelength
class AziPatternPeakPo(PatternPeakPo):
def __init__(self):
self.azi_ranges = []
| [
"SHDShim@gmail.com"
] | SHDShim@gmail.com |
f98de1eedd4d4c6fa28f8f76d28224574704c59b | 36959b56e506dbbe2d3c381cdccfe16965c14d24 | /Django/social/posts/migrations/0001_initial.py | e8fe75165e17e54f70de6e9f2a5a55528a6969fe | [] | no_license | Sathishkumar-M/Django | e2935fe0c69acb4cb39be2bc0504fd3d5619d002 | e54038ef70295274639b6207efe8e7e3939cbe36 | refs/heads/master | 2020-03-21T20:22:48.684770 | 2018-06-28T10:42:51 | 2018-06-28T10:42:51 | 139,003,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-28 15:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
('message_html', models.TextField(editable=False)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='groups.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
},
),
migrations.AlterUniqueTogether(
name='post',
unique_together=set([('user', 'message')]),
),
]
| [
"sathishkumar.appiness@gmail.com"
] | sathishkumar.appiness@gmail.com |
50d0989520ba6a55b188773a135e56c496e1035a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2639/60636/252547.py | c0df1380f133df5be225f8d9e3d808ba7ff728ff | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | import collections
def characterReplacement(s,k):
res=0
l=0
mf=0
a=list(s)
alls=[]
for i in a:
if(not i in alls):
alls.append(i)
count=[]
for i in alls:
count.append(0)
d = collections.defaultdict(int)
for r, c in enumerate(s):
d[a[r]]=d[a[r]]+1
print(d)
count[alls.index(a[r])]=count[alls.index(a[r])]+1
print(count)
mf = max(mf, d[a[r]])
while r - l + 1 - mf > k:
d[s[l]] -= 1
l += 1
res = max(res, r - l + 1)
return res
s=input()
k=int(input())
print(characterReplacement(s,k)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
fd92e6b94e9cc2cef862406f623ad9a42b1ba1ff | 6e1407eb15c2003e8b62073b719dc676d8652366 | /aiodjango/routing.py | 00f1f431ec7ae22780028bd9b4478e9764bde86a | [
"BSD-2-Clause"
] | permissive | mlavin/aiodjango | 9f65155ac85de91ba14cb820b9c1d56849ffa05c | 9989f3160453088cd7bfdfde218da143ee9f93ee | refs/heads/master | 2021-07-16T07:56:29.658857 | 2015-12-22T14:54:31 | 2015-12-22T14:54:31 | 48,146,080 | 7 | 6 | BSD-2-Clause | 2021-02-26T02:13:06 | 2015-12-17T01:55:48 | Python | UTF-8 | Python | false | false | 1,570 | py | import asyncio
import inspect
import re
from importlib import import_module
from django.conf import settings
from django.contrib.admindocs.views import extract_views_from_urlpatterns
from django.core.urlresolvers import reverse
from aiohttp.web import DynamicRoute
class DjangoRegexRoute(DynamicRoute):
"""Compatibility shim between routing frameworks."""
def __init__(self, method, handler, name, regex, *, expect_handler=None):
if not regex.lstrip('^').startswith('/'):
regex = '^/' + regex.lstrip('^')
pattern = re.compile(regex)
super().__init__(method, handler, name, pattern, None, expect_handler=expect_handler)
def url(self, query=None, **kwargs):
url = reverse(self.name, kwargs=kwargs)
return self._append_query(url, query)
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return ("<DjangoRegexRoute {name}[{method}] -> {handler!r}"
.format(name=name, method=self.method, handler=self.handler))
def get_aio_routes(patterns=None):
"""Walk the URL patterns to find any coroutine views."""
if patterns is None:
urlconf = import_module(settings.ROOT_URLCONF)
patterns = urlconf.urlpatterns
routes = []
view_functions = extract_views_from_urlpatterns(patterns)
for (func, regex, namespace, name) in view_functions:
if asyncio.iscoroutinefunction(func) or inspect.isgeneratorfunction(func):
routes.append(DjangoRegexRoute('*', func, name, regex))
return routes
| [
"markdlavin@gmail.com"
] | markdlavin@gmail.com |
c48bc70ecb26b1f86f89404e10f91de9a4982f63 | e585c3a61b830d3c24a8cec8343d262c84c724e7 | /Figaro/page_objects/common/menu/locators.py | a64cf359cb42a79dd64da1b9e84261469740bec3 | [] | no_license | Valupiruiz/AutomationPHP | bb0728b2b6508b017c133a7d560a652033adeaf4 | 9a92634ac9f5b27e46723294f9a4cc83a1f99252 | refs/heads/master | 2023-01-18T17:27:57.819270 | 2020-11-27T15:04:49 | 2020-11-27T15:04:49 | 310,594,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | from selenium.webdriver.common.by import By
from page_objects.base_page import Locator
class MenuLocators:
SOLAPA_MENU_LBL = Locator(By.LINK_TEXT, "{solapa}")
# HOME_LBL = Locator(By.XPATH, "//a[contains(text(),'Home')]")
# MATERIAS_LBL = Locator(By.XPATH, "//a[contains(text(),'Materias')]")
# NOTICIAS_LBL = Locator(By.XPATH, "//a[contains(text(),'Noticias')]")
# ADMISIONES_LBL = Locator(By.XPATH, "//a[contains(text(),'Admisiones')]")
# INFORMES_LBL = Locator(By.XPATH, "//td[contains(@class,'tab')]//a[contains(text(),'Informes')]")
# PRESENTISMO_LBL = Locator(By.XPATH, "//a[contains(text(),'Presentismo')]")
# ASISTENCIA_LBL = Locator(By.XPATH, "//a[contains(text(),'Asistencia')]")
# CONVIVENCIA_LBL = Locator(By.XPATH, "//a[contains(text(),'Convivencia')]")
# CERTIFICADOS_LBL = Locator(By.XPATH, "//a[contains(text(),'Certificados')]")
# BOLETINES_LBL = Locator(By.XPATH, "//a[contains(text(),'Boletines')]")
# GRUPOS_USUARIO_LBL = Locator(By.XPATH, "//a[contains(text(),'Grupos de usuarios')]")
# SEGUIMIENTO_DOCENTE_LBL = Locator(By.XPATH, "//a[contains(text(),'Seguimiento Docentes')]")
# ADMINISTRACION_LBL = Locator(By.XPATH, "//a[contains(text(),'Administración')]")
# PERMISOS_LBL = Locator(By.XPATH, "//a[contains(text(),'Permisos')]")
# MIS_CURSOS_LBL = Locator(By.XPATH, "//a[contains(text(),'Mis cursos')]")
# VINCULOS_EXTERNOS_LBL = Locator(By.XPATH, "//a[contains(text(),'Vínculos Externos')]")
| [
"tomasmoreira04@gmail.com"
] | tomasmoreira04@gmail.com |
0027b52908f754c4a3dd274b8b33492640656baa | 9f99485ac5479c1e6169e71d88a33c31ff591f4e | /tests/app/na_celery/test_stats_tasks.py | db6b7dcaaedad6fb138030fad167c6faa5da71c5 | [
"MIT"
] | permissive | NewAcropolis/api | b8c65554ca78ac0e87fbef46f5f2fbecb6d7700a | 34367f55d3c9ee5bf870956ffc90fd23da559b15 | refs/heads/master | 2023-08-31T09:27:02.125549 | 2023-08-26T22:15:10 | 2023-08-26T22:15:10 | 99,582,634 | 1 | 1 | MIT | 2023-08-26T22:15:11 | 2017-08-07T13:46:23 | Python | UTF-8 | Python | false | false | 4,282 | py | import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from flask import current_app
from freezegun import freeze_time
from mock import call
import requests_mock
from app.na_celery.stats_tasks import send_num_subscribers_and_social_stats
from tests.db import create_member
class WhenProcessingSendNumSubscribersTask:
@freeze_time("2021-01-01T10:00:00")
def it_sends_num_subscribers_and_social_stats(self, mocker, db, db_session):
create_member(created_at='2020-10-10T10:00:00')
create_member(email='test2@example.com', created_at='2020-12-10T10:00:00')
mock_send_ga_event = mocker.patch('app.na_celery.stats_tasks.send_ga_event')
with requests_mock.mock() as r:
r.get(
current_app.config.get('FACEBOOK_URL'),
text='<html><body><div><div>1,000</div><div>Total follows</div></div></body></html>')
r.get(
current_app.config.get('INSTAGRAM_URL'),
text='{"data":{"user":{"edge_followed_by":{"count":1100,"page_info":'
'{"has_next_page":false,"end_cursor":null},"edges":[]}}},"status":"ok"}')
send_num_subscribers_and_social_stats()
assert mock_send_ga_event.call_args_list == [
call('Number of subscribers', 'members', 'num_subscribers_december', 2),
call('Number of new subscribers', 'members', 'num_new_subscribers_december', 1),
call('Facebook followers count', 'social', 'num_facebook_december', 1000),
call('Instagram followers count', 'social', 'num_instagram_december', 1100),
]
@freeze_time("2021-01-01T10:00:00")
def it_doesnt_send_instagram_stats(self, mocker, db, db_session):
mocker.patch.dict('app.application.config', {
'INSTAGRAM_URL': ''
})
create_member(created_at='2020-10-10T10:00:00')
create_member(email='test2@example.com', created_at='2020-12-10T10:00:00')
mock_send_ga_event = mocker.patch('app.na_celery.stats_tasks.send_ga_event')
with requests_mock.mock() as r:
r.get(
current_app.config.get('FACEBOOK_URL'),
text='<html><body><div><div>1,000</div><div>Total follows</div></div></body></html>')
r.get(
current_app.config.get('INSTAGRAM_URL'),
text='{"data":{"user":{"edge_followed_by":{"count":1100,"page_info":'
'{"has_next_page":false,"end_cursor":null},"edges":[]}}},"status":"ok"}')
send_num_subscribers_and_social_stats()
assert mock_send_ga_event.call_args_list == [
call('Number of subscribers', 'members', 'num_subscribers_december', 2),
call('Number of new subscribers', 'members', 'num_new_subscribers_december', 1),
call('Facebook followers count', 'social', 'num_facebook_december', 1000),
call('Instagram followers count', 'social', 'num_instagram_december', 'url not set'),
]
@freeze_time("2020-12-01T10:00:00")
def it_sends_num_subscribers_and_failed_social_stats(self, mocker, db, db_session):
create_member(created_at='2020-10-10T10:00:00')
mock_send_ga_event = mocker.patch('app.na_celery.stats_tasks.send_ga_event')
with requests_mock.mock() as r:
r.get(
current_app.config.get('FACEBOOK_URL'),
text='<html><body><div><div>1,000</div><div>Total followers</div></div></body></html>')
r.get(
current_app.config.get('INSTAGRAM_URL'),
text='<html><head><meta property="og:description" content="'
'1,100 Following, 200 Posts"/></head></html>')
send_num_subscribers_and_social_stats()
assert mock_send_ga_event.call_args_list == [
call('Number of subscribers', 'members', 'num_subscribers_november', 1),
call('Number of new subscribers', 'members', 'num_new_subscribers_november', 0),
call('Facebook followers count', 'social', 'num_facebook_november', 'failed'),
call('Instagram followers count', 'social', 'num_instagram_november', 'failed'),
]
| [
"kenlt.uk@gmail.com"
] | kenlt.uk@gmail.com |
1719e02eb066af7f9a41f3e1065f2b42d5223eb6 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/third_party/oauthlib/oauth1/rfc5849/endpoints/authorization.py | df211a199ad7c422aa7f8c84b0131a689a7228c2 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 6,907 | py | # -*- coding: utf-8 -*-
"""oauthlib.oauth1.rfc5849.endpoints.authorization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import Request, add_params_to_uri
from .. import errors
from .base import BaseEndpoint
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
class AuthorizationEndpoint(BaseEndpoint):
"""An endpoint responsible for letting authenticated users authorize access
to their protected resources to a client.
Typical use would be to have two views, one for displaying the authorization
form and one to process said form on submission.
The first view will want to utilize ``get_realms_and_credentials`` to fetch
requested realms and useful client credentials, such as name and
description, to be used when creating the authorization form.
During form processing you can use ``create_authorization_response`` to
validate the request, create a verifier as well as prepare the final
redirection URI used to send the user back to the client.
See :doc:`/oauth1/validator` for details on which validator methods to
implement
for this endpoint.
"""
def create_verifier(self, request, credentials):
"""Create and save a new request token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param credentials: A dict of extra token credentials.
:returns: The verifier as a dict.
"""
verifier = {
'oauth_token': request.resource_owner_key,
'oauth_verifier': self.token_generator(),
}
verifier.update(credentials)
self.request_validator.save_verifier(request.resource_owner_key, verifier,
request)
return verifier
def create_authorization_response(self,
uri,
http_method='GET',
body=None,
headers=None,
realms=None,
credentials=None):
"""Create an authorization response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of credentials to include in the verifier.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
If the callback URI tied to the current token is "oob", a response with
a 200 status code will be returned. In this case, it may be desirable to
modify the response to better display the verifier to the client.
An example of an authorization request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AuthorizationEndpoint
>>> endpoint = AuthorizationEndpoint(your_validator)
>>> h, b, s = endpoint.create_authorization_response(
... 'https://your.provider/authorize?oauth_token=...',
... credentials={
... 'extra': 'argument',
... })
>>> h
{'Location':
'https://the.client/callback?oauth_verifier=...&extra=argument'}
>>> b
None
>>> s
302
An example of a request with an "oob" callback::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AuthorizationEndpoint
>>> endpoint = AuthorizationEndpoint(your_validator)
>>> h, b, s = endpoint.create_authorization_response(
... 'https://your.provider/authorize?foo=bar',
... credentials={
... 'extra': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_verifier=...&extra=argument'
>>> s
200
"""
request = self._create_request(
uri, http_method=http_method, body=body, headers=headers)
if not request.resource_owner_key:
raise errors.InvalidRequestError(
'Missing mandatory parameter oauth_token.')
if not self.request_validator.verify_request_token(
request.resource_owner_key, request):
raise errors.InvalidClientError()
request.realms = realms
if (request.realms and not self.request_validator.verify_realms(
request.resource_owner_key, request.realms, request)):
raise errors.InvalidRequestError(
description=('User granted access to realms outside of '
'what the client may request.'))
verifier = self.create_verifier(request, credentials or {})
redirect_uri = self.request_validator.get_redirect_uri(
request.resource_owner_key, request)
if redirect_uri == 'oob':
response_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response_body = urlencode(verifier)
return response_headers, response_body, 200
else:
populated_redirect = add_params_to_uri(redirect_uri, verifier.items())
return {'Location': populated_redirect}, None, 302
def get_realms_and_credentials(self,
uri,
http_method='GET',
body=None,
headers=None):
"""Fetch realms and credentials for the presented request token.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:returns: A tuple of 2 elements.
1. A list of request realms.
2. A dict of credentials which may be useful in creating the
authorization form.
"""
request = self._create_request(
uri, http_method=http_method, body=body, headers=headers)
if not self.request_validator.verify_request_token(
request.resource_owner_key, request):
raise errors.InvalidClientError()
realms = self.request_validator.get_realms(request.resource_owner_key,
request)
return realms, {'resource_owner_key': request.resource_owner_key}
| [
"jonathang132298@gmail.com"
] | jonathang132298@gmail.com |
5f50cc960cb9a4c2fba3b5674ec717c3cdccc980 | 670f4ba8ded99b420c3454c6ae35789667880cc8 | /tobiko/tests/functional/shell/sh/test_execute.py | 6a8685be113b2165a32e615e5bb5013e091188ba | [
"Apache-2.0"
] | permissive | FedericoRessi/tobiko | 892db522198ab48380892138459d801c4bd00efa | ce2a8734f8b4203ec38078207297062263c49f6f | refs/heads/master | 2022-07-26T22:52:10.273883 | 2022-07-20T20:04:43 | 2022-07-20T20:04:43 | 145,856,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,929 | py | # Copyright (c) 2019 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import os
import typing
import testtools
import tobiko
from tobiko import config
from tobiko.openstack import keystone
from tobiko.openstack import stacks
from tobiko.shell import sh
from tobiko.shell import ssh
CONF = config.CONF
SSH_EXPECTED_SHELL = None
LOCAL_EXPECTED_SHELL = '/bin/sh -c'
class ExecuteTest(testtools.TestCase):
@property
def expected_shell(self) -> typing.Optional[str]:
if ssh.ssh_proxy_client() is None:
return LOCAL_EXPECTED_SHELL
else:
return SSH_EXPECTED_SHELL
def test_succeed(self,
command: sh.ShellCommandType = 'true',
stdin: str = None,
stdout: str = None,
stderr: str = None,
expect_exit_status: typing.Optional[int] = 0,
**kwargs):
process = self.execute(command=command,
stdin=stdin,
stdout=bool(stdout),
stderr=bool(stderr),
expect_exit_status=expect_exit_status,
**kwargs)
self.assertEqual(self.expected_command(command), process.command)
if stdin:
self.assertEqual(stdin, str(process.stdin))
else:
self.assertIsNone(process.stdin)
if stdout:
self.assertEqual(stdout, str(process.stdout))
else:
self.assertIsNone(process.stdout)
if stderr:
self.assertEqual(stderr, str(process.stderr))
else:
self.assertIsNone(process.stderr)
if expect_exit_status is not None:
self.assertEqual(0, process.exit_status)
def test_succeed_with_command_list(self):
self.test_succeed(['echo', 'something'],
stdout='something\n')
def test_succeed_reading_from_stdout(self):
self.test_succeed('echo something',
stdout='something\n')
def test_succeed_reading_from_stderr(self):
self.test_succeed('echo something >&2',
stderr='something\n')
def test_succeed_writing_to_stdin(self):
self.test_succeed('cat',
stdin='some input\n',
stdout='some input\n')
def test_succeed_with_timeout(self):
self.test_succeed(timeout=30.)
def test_succeed_with_no_exit_status(self):
self.test_succeed(command='false', expect_exit_status=None)
def test_succeed_with_current_dir(self):
temp_file = self.make_temporary()
self.execute(command=f"echo '{self.id()}' > '{temp_file}'")
self.test_succeed(command=f"cat './{os.path.basename(temp_file)}'",
current_dir=os.path.dirname(temp_file),
stdout=f"{self.id()}\n")
def test_fails(self, command='false', exit_status=None, stdin=None,
stdout=None, stderr=None, expect_exit_status=0,
**kwargs):
ex = self.assertRaises(sh.ShellCommandFailed,
self.execute,
command=command,
expect_exit_status=expect_exit_status,
stdin=stdin,
stdout=bool(stdout),
stderr=bool(stderr),
**kwargs)
self.assertEqual(self.expected_command(command), ex.command)
if stdin:
self.assertEqual(stdin, ex.stdin)
else:
self.assertIsNone(ex.stdin)
if stdout:
self.assertEqual(stdout, ex.stdout)
else:
self.assertIsNone(ex.stdout)
if stderr:
self.assertEqual(stderr, ex.stderr)
else:
self.assertIsNone(ex.stderr)
if exit_status is not None:
self.assertEqual(exit_status, ex.exit_status)
else:
self.assertTrue(ex.exit_status)
def test_fails_getting_exit_status(self):
self.test_fails('exit 15', exit_status=15)
def test_fails_reading_from_stdout(self):
self.test_fails(command='echo something && false',
stdout='something\n')
def test_fails_reading_from_stderr(self):
self.test_fails(command='echo something >&2 && false',
stderr='something\n')
def test_fails_writing_to_stdin(self):
self.test_fails('cat && false',
stdin='some input\n',
stdout='some input\n')
def test_fails_with_check_exit_status(self):
self.test_fails(command='true', expect_exit_status=1, exit_status=0)
def test_timeout_expires(self, command='sleep 10', timeout=5., stdin=None,
stdout=None, stderr=None, **kwargs):
ex = self.assertRaises(sh.ShellTimeoutExpired,
self.execute,
command=command,
timeout=timeout,
stdin=stdin,
stdout=bool(stdout),
stderr=bool(stderr),
**kwargs)
self.assertEqual(self.expected_command(command), ex.command)
if stdin:
self.assertTrue(stdin.startswith(ex.stdin))
else:
self.assertIsNone(ex.stdin)
if stdout:
self.assertTrue(stdout.startswith(ex.stdout))
else:
self.assertIsNone(ex.stdout)
if stderr:
self.assertTrue(stderr.startswith(ex.stderr))
else:
self.assertIsNone(ex.stderr)
self.assertEqual(timeout, ex.timeout)
def make_temporary(self,
directory=False,
add_cleanup=True) -> str:
command = sh.shell_command('mktemp')
if directory:
command += '-d'
temporary_path = self.execute(command=command).stdout.strip()
if add_cleanup:
self.addCleanup(sh.execute, f"rm -fR '{temporary_path}'")
return temporary_path
def execute(self, **kwargs):
return sh.execute(**kwargs)
def expected_command(self, command):
command = sh.shell_command(command)
if self.expected_shell is not None:
command = sh.shell_command(self.expected_shell) + [str(command)]
return str(command)
class LocalExecuteTest(ExecuteTest):
expected_shell = LOCAL_EXPECTED_SHELL
def execute(self, **kwargs):
return sh.local_execute(**kwargs)
@keystone.skip_unless_has_keystone_credentials()
class SSHExecuteTest(ExecuteTest):
expected_shell = SSH_EXPECTED_SHELL
server_stack = tobiko.required_fixture(
stacks.UbuntuMinimalServerStackFixture)
@property
def ssh_client(self):
return self.server_stack.ssh_client
def execute(self, **kwargs):
return sh.ssh_execute(ssh_client=self.ssh_client, **kwargs)
@keystone.skip_unless_has_keystone_credentials()
class CirrosSSHExecuteTest(SSHExecuteTest):
server_stack = tobiko.required_fixture(
stacks.CirrosServerStackFixture)
| [
"fressi@redhat.com"
] | fressi@redhat.com |
2d294b34dda835cf074d3b465100db62976b0c34 | da7fd7d45140a244809b81fa7ede3cbe58b82c8b | /azure-kusto-data/azure/kusto/data/aio/_models.py | fab3892a573bd9cc9ffc289837928129719fce0d | [
"MIT"
] | permissive | Azure/azure-kusto-python | 61ba3a1e78de8b1872dc6cb7d2d3799913b2a154 | 59e263b17716b1499e596d667c1137598b98aac0 | refs/heads/master | 2023-08-31T13:52:27.628079 | 2023-08-31T06:41:06 | 2023-08-31T06:41:06 | 108,257,720 | 176 | 107 | MIT | 2023-09-06T18:14:56 | 2017-10-25T10:55:44 | Python | UTF-8 | Python | false | false | 590 | py | from typing import AsyncIterator
from azure.kusto.data._models import KustoResultRow, BaseStreamingKustoResultTable
class KustoStreamingResultTable(BaseStreamingKustoResultTable):
"""Async Iterator over a Kusto result table."""
async def __anext__(self) -> KustoResultRow:
try:
row = await self.raw_rows.__anext__()
except StopAsyncIteration:
self.finished = True
raise
self.row_count += 1
return KustoResultRow(self.columns, row)
def __aiter__(self) -> AsyncIterator[KustoResultRow]:
return self
| [
"noreply@github.com"
] | Azure.noreply@github.com |
3747d37e151d236d260c992a23eda35e08cf136e | 6a1070b89cd0342d0b6ca3a0bda86b538ce554d0 | /ibis/impala/tests/test_connection_pool.py | 043459754ce60a9bbc602df3b9224a3abddbb74a | [
"Apache-2.0"
] | permissive | deepfield/ibis | d115013f602378a808b8d2000c27e94d9f3eb115 | 1b4e276a9404ad5e8025918abd28bc04dc9ff1bb | refs/heads/master | 2021-07-10T02:31:59.900362 | 2019-05-21T14:25:31 | 2019-05-21T14:25:31 | 64,854,231 | 0 | 1 | Apache-2.0 | 2021-05-07T17:41:58 | 2016-08-03T14:51:03 | Python | UTF-8 | Python | false | false | 681 | py | import pytest
import ibis
pytest.importorskip('sqlalchemy')
pytest.importorskip('impala.dbapi')
pytestmark = pytest.mark.impala
def test_connection_pool_size(hdfs, env, test_data_db):
client = ibis.impala.connect(
port=env.impala_port,
hdfs_client=hdfs,
host=env.impala_host,
database=test_data_db,
)
assert len(client.con.connection_pool) == 1
def test_connection_pool_size_after_close(hdfs, env, test_data_db):
client = ibis.impala.connect(
port=env.impala_port,
hdfs_client=hdfs,
host=env.impala_host,
database=test_data_db,
)
client.close()
assert not client.con.connection_pool
| [
"cpcloud@gmail.com"
] | cpcloud@gmail.com |
f38f48ad2b4f79421fb37be2f69f95435862c7d9 | 5e350a7f147a52a72ca3a859a1df5b3acd710952 | /service/migrations/0008_appointment_offset.py | 2c7f26a1ec1b300e952c114f0d09ae31c015b285 | [
"MIT"
] | permissive | jwoolnt/Opal-Health | 1330389f8698f85ead76e22aff094e5e1b482b90 | ebe0e4c88157ddbe8e56afcc43e4097ca8c4bd49 | refs/heads/master | 2023-07-05T22:07:28.119362 | 2021-03-17T13:27:33 | 2021-03-17T13:27:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 3.1.3 on 2020-11-12 19:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0007_auto_20201112_1800'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='offset',
field=models.CharField(default='+00:00', max_length=6),
),
]
| [
"you@example.com"
] | you@example.com |
29d409c9d7bf4680ad1ee7ea08d6f17b97a1e0dc | b0814b43440a36c9998924c9fe05f335302a2717 | /venv/lib/python2.7/site-packages/traits/tests/test_special_event_handlers.py | ce1a437d5eed37021e10887ac40ed65986d7eed6 | [
"MIT"
] | permissive | nagyistge/electrode-gui | 0b47324ce8c61ffb54c24c400aee85f16fd79c7a | 6d89c78ea61935042ead5df5e1474101df3557eb | refs/heads/master | 2021-06-03T22:47:30.329355 | 2016-09-13T19:43:31 | 2016-09-13T19:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | from traits.testing.unittest_tools import unittest
from traits.api import Any, HasStrictTraits, Str
class TestSpecialEvent(unittest.TestCase):
""" Test demonstrating special change events using the 'event' metadata.
"""
def setUp(self):
self.change_events = []
self.foo = Foo(test=self)
def test_events(self):
self.foo.val = 'CHANGE'
values = ['CHANGE']
self.failUnlessEqual(self.change_events, values)
def test_instance_events(self):
foo = self.foo
foo.add_trait('val2', Str(event='the_trait'))
foo.val2 = 'CHANGE2'
values = ['CHANGE2']
self.failUnlessEqual(self.change_events, values)
class Foo(HasStrictTraits):
val = Str(event='the_trait')
test = Any(None)
def _the_trait_changed(self, new):
if self.test is not None:
self.test.change_events.append(new)
| [
"xavierislam@gmail.com"
] | xavierislam@gmail.com |
eaab8d8267b81b5a50c8a49ee7586f248350f51c | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /other_thing_and_part/last_week.py | 48024b96c47690f8612c0e49e007095cc1d25708 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#! /usr/bin/env python
def last_hand(str_arg):
life_and_company(str_arg)
print('go_little_year_above_big_hand')
def life_and_company(str_arg):
print(str_arg)
if __name__ == '__main__':
last_hand('world')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
4c86f506f6232048e2aabfdd346c4a31df66ce7d | ddf7d8f996a0cf66b0e083e0557305b3be4619e5 | /myJunkYard/myTutorials/Python/ciscoPythonClass/day01/print.py | a43916e1aba4c253e31c25065882c796758be863 | [] | no_license | archerImagine/myNewJunk | 8fab3e6ada03eee3aebb5c712d50bcfb38bf48b0 | 42fff352f6057f84ab8c81f1debc149881c1e49f | refs/heads/master | 2020-06-16T12:22:30.590672 | 2016-11-29T17:07:23 | 2016-11-29T17:07:23 | 75,103,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | #!/usr/bin/env python
from sys import stderr
print "Hello World",10,20,"AnotherString"
print "This is second Line"
print >>stderr, "This is error"
#python print.py >out, redirects to stderr | [
"animeshbhadra@Animeshs-MacBook-Pro.local"
] | animeshbhadra@Animeshs-MacBook-Pro.local |
bf03021e93bee7e9aa1a4c276f8d7c6ac90b041f | 72a3f41a94202d6d378c222c5cfa9e68155109bb | /selfika/trunk/widgets/set_media.py | 58b393e841d30d9fe4f6aca49b42758442b5c28c | [] | no_license | vakhov/python-django-projects | c312b8bcd94aa448a2678c156ff4936e4a68f668 | 6f296aa75d7692eb5dcb68ef4ce20cadee9dc9e6 | refs/heads/master | 2021-01-17T12:12:56.730072 | 2012-07-25T16:40:45 | 2012-07-25T16:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | # -*- coding: utf-8 -*-
from django.db import models
from models import Widget
from grouping import create_group, add2group
create_group('media', 'Мультимедиа')
@add2group('Изображение', 'media')
class SimpleImage(Widget):
image = models.ImageField("Изображение", upload_to='media_widgets/image')
alt = models.CharField(max_length=255, blank=True, default="")
title = models.CharField(max_length=255, blank=True, default="")
link = models.CharField("Ссылка (необязательно)", max_length=255, blank=True, default="")
#@add2group('Видео', 'media')
#class SimpleVideo(Widget):
# video = models.FileField("Видео", upload_to='media_widgets/video')
# alt = models.CharField(max_length=255, blank=True, default="")
# title = models.CharField(max_length=255, blank=True, default="")
| [
"succubi@succubi-Inspiron-1501.(none)"
] | succubi@succubi-Inspiron-1501.(none) |
407cb591686cc57f8fe85bcbe48db25a1b164985 | 90a2d0bed5d9eeb6b56c7ac96cc5fbee79dc4c5e | /.history/adding_20210405134642.py | b024a9f73366483777d1107dfb968b7b69e78d48 | [] | no_license | KustomApe/dev | 2d495e22363707b15a22860a773dac6c463903ee | a936f5c3b0928eaa2efaf28c6be8cacc17c3ecb3 | refs/heads/master | 2023-04-28T11:20:03.056953 | 2023-04-07T17:43:40 | 2023-04-07T17:43:40 | 138,429,111 | 1 | 0 | null | 2023-04-25T19:26:09 | 2018-06-23T19:47:23 | Python | UTF-8 | Python | false | false | 499 | py | n = 1
t = 0
# 1から5までの足し算をループを使って実装する
# while True:
# if n == 6:
# print(t)
# break
# else:
# t = t + n
# n = n + 1
# 1から10までの足し算をループを使って実装する
# while True:
# if n >= 11:
# print(t)
# break
# else:
# t = t + n
# n = n + 1
# 1から10までの奇数の合計値を計算する
n = 1
t = 0
while True:
if n <= 11:
tester = n + 1 | [
"kustomape@gmail.com"
] | kustomape@gmail.com |
fa4d6cbfa01ca1eaa307be9dd1d48a72df5366c4 | f416ab3adfb5c641dc84022f918df43985c19a09 | /problems/kattis/squarepeg/sol.py | fe44506a9020cf78270930365d6833c556f2478b | [] | no_license | NicoKNL/coding-problems | a4656e8423e8c7f54be1b9015a9502864f0b13a5 | 4c8c8d5da3cdf74aefcfad4e82066c4a4beb8c06 | refs/heads/master | 2023-07-26T02:00:35.834440 | 2023-07-11T22:47:13 | 2023-07-11T22:47:13 | 160,269,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from math import sqrt
if __name__ == "__main__":
L, R = map(int, input().split())
diagonal = sqrt(L * L + L * L)
if diagonal <= 2 * R:
print("fits")
else:
print("nope")
| [
"klaassen.nico@gmail.com"
] | klaassen.nico@gmail.com |
8937a9f52041c3813031738685edc0154b898fff | 02e930875e95713a387c60b7de816a3a354ceb3d | /bids/variables/tests/test_variables.py | c45eae2e84128fd3b477b5ef3604f43a45ffa328 | [
"MIT"
] | permissive | Islast/pybids | 6a7899cbef03ec3579a21e1cbe85deb5d180ca1e | 3e80e617e6bd6258e027c9937ccedafe1c8e6b14 | refs/heads/master | 2020-03-23T22:00:14.970425 | 2018-07-24T14:14:08 | 2018-07-24T14:14:08 | 142,146,832 | 0 | 0 | MIT | 2018-07-24T11:06:51 | 2018-07-24T11:06:51 | null | UTF-8 | Python | false | false | 6,023 | py | from bids.grabbids import BIDSLayout
import pytest
from os.path import join
from bids.tests import get_test_data_path
from bids.variables import (merge_variables, DenseRunVariable, SimpleVariable,
load_variables)
from bids.variables.entities import RunInfo
import numpy as np
import pandas as pd
import uuid
def generate_DEV(name='test', sr=20, duration=480):
n = duration * sr
values = np.random.normal(size=n)
ent_names = ['task', 'run', 'session', 'subject']
entities = {e: uuid.uuid4().hex for e in ent_names}
image = uuid.uuid4().hex + '.nii.gz'
run_info = RunInfo(entities, duration, 2, image)
return DenseRunVariable('test', values, run_info, 'dummy', sr)
@pytest.fixture
def layout1():
path = join(get_test_data_path(), 'ds005')
layout = BIDSLayout(path, exclude='derivatives/')
return layout
@pytest.fixture(scope="module")
def layout2():
path = join(get_test_data_path(), '7t_trt')
layout = BIDSLayout(path)
return layout
def test_dense_event_variable_init():
dev = generate_DEV()
assert dev.sampling_rate == 20
assert dev.run_info[0].duration == 480
assert dev.source == 'dummy'
assert len(dev.values) == len(dev.index)
def test_dense_event_variable_resample():
dev = generate_DEV()
dev2 = dev.clone().resample(sampling_rate=40)
assert len(dev2.values) == len(dev2.index)
assert len(dev2.values) == 2 * len(dev.values)
def test_merge_wrapper():
dev = generate_DEV()
data = pd.DataFrame({'amplitude': [4, 3, 2, 5]})
sev = SimpleVariable('simple', data, 'dummy')
# Should break if asked to merge different classes
with pytest.raises(ValueError) as e:
merge_variables([dev, sev])
assert "Variables of different classes" in str(e)
def test_sparse_run_variable_to_dense(layout1):
index = load_variables(layout1, types='events', scan_length=480)
runs = index.get_nodes('run', {'subject': ['01', '02']})
for i, run in enumerate(runs):
var = run.variables['RT']
dense = var.to_dense(20)
# Check that all unique values are identical
sparse_vals = set(np.unique(var.values.values)) | {0}
dense_vals = set(np.unique(dense.values.values))
assert sparse_vals == dense_vals
assert len(dense.values) > len(var.values)
assert isinstance(dense, DenseRunVariable)
assert dense.values.shape == (9600, 1)
assert len(dense.run_info) == len(var.run_info)
assert dense.source == 'events'
def test_merge_densified_variables(layout1):
SR = 10
dataset = load_variables(layout1, types='events', scan_length=480)
runs = dataset.get_nodes('run')
vars_ = [r.variables['RT'].to_dense(SR) for r in runs]
dense = merge_variables(vars_)
assert isinstance(dense, DenseRunVariable)
n_rows = 480 * SR
assert dense.values.shape == (len(runs) * n_rows, 1)
for i in range(len(runs)):
onset = i * n_rows
offset = onset + n_rows
run_vals = vars_[i].values
dense_vals = dense.values.iloc[onset:offset].reset_index(drop=True)
assert dense_vals.equals(run_vals)
def test_densify_merged_variables(layout1):
SR = 10
dataset = load_variables(layout1, types='events', scan_length=480)
runs = dataset.get_nodes('run')
vars_ = [r.variables['RT'] for r in runs]
var = merge_variables(vars_)
dense = var.to_dense(SR)
assert isinstance(dense, DenseRunVariable)
n_rows = 480 * SR
assert dense.values.shape == (len(runs) * n_rows, 1)
for i in range(len(runs)):
onset = i * n_rows
offset = onset + n_rows
run_vals = vars_[i].to_dense(SR).values
dense_vals = dense.values.iloc[onset:offset].reset_index(drop=True)
assert dense_vals.equals(run_vals)
def test_merge_simple_variables(layout2):
index = load_variables(layout2, types='sessions')
subjects = index.get_nodes('subject')
variables = [s.variables['panas_sad'] for s in subjects]
n_rows = sum([len(c.values) for c in variables])
merged = merge_variables(variables)
assert len(merged.values) == n_rows
assert set(merged.index.columns) == set(variables[0].index.columns)
assert variables[3].values.iloc[1] == merged.values.iloc[7]
def test_merge_sparse_run_variables(layout1):
dataset = load_variables(layout1, types='events', scan_length=480)
runs = dataset.get_nodes('run')
variables = [r.variables['RT'] for r in runs]
n_rows = sum([len(c.values) for c in variables])
merged = merge_variables(variables)
assert len(merged.values) == n_rows
assert set(merged.index.columns) == set(variables[0].index.columns)
def test_merge_dense_run_variables(layout2):
variables = [generate_DEV() for i in range(20)]
variables += [generate_DEV(duration=400) for i in range(8)]
n_rows = sum([len(c.values) for c in variables])
merged = merge_variables(variables)
assert len(merged.values) == n_rows
assert set(merged.index.columns) == set(variables[0].index.columns)
def test_simple_variable_to_df(layout1):
pass
def test_sparse_run_variable_to_df(layout1):
pass
def test_dense_run_variable_to_df(layout2):
pass
def test_filter_simple_variable(layout2):
dataset = load_variables(layout2, types=['scans'])
sessions = dataset.get_nodes('session')
variables = [s.variables['surroundings'] for s in sessions]
merged = merge_variables(variables)
assert merged.to_df().shape == (60, 9)
filt = merged.filter({'acq': 'fullbrain'})
assert filt.to_df().shape == (40, 9)
flt1 = merged.filter({'acq': 'fullbrain', 'subject': ['01', '02']}).to_df()
assert flt1.shape == (8, 9)
flt2 = merged.filter(query='acq=="fullbrain" and subject in ["01", "02"]')
flt2 = flt2.to_df()
assert flt1.equals(flt2)
assert merged.filter({'nonexistent': 2}, strict=True) is None
merged.filter({'acq': 'fullbrain'}, inplace=True)
assert merged.to_df().shape == (40, 9)
| [
"tyarkoni@gmail.com"
] | tyarkoni@gmail.com |
4a35e5d1186ac7712235416eb1cf34eb9202f1a6 | d274e22b1cc5d546855fe46b089b13cfe2f4047c | /may2020/solutions/day03_RansomNote.py | 5957935a487e8bfd84bc16b19821e49b9a0ab508 | [] | no_license | varunkumar032/lockdown-leetcode | ca6b7a8133033110680dd226c897dd8a1482682b | 15a72a53be9005eca816f018cb1b244f2aa4cdfb | refs/heads/master | 2023-06-30T08:31:54.323747 | 2021-07-12T11:29:59 | 2021-07-12T11:29:59 | 260,616,280 | 0 | 0 | null | 2021-05-06T10:24:48 | 2020-05-02T04:52:37 | Python | UTF-8 | Python | false | false | 910 | py | # Given an arbitrary ransom note string and another string containing letters from all the magazines, write a function that will return true if the ransom note can be constructed from the magazines ; otherwise, it will return false.
# Each letter in the magazine string can only be used once in your ransom note.
# Note:
# You may assume that both strings contain only lowercase letters.
def canConstruct(ransomNote, magazine):
ransomNoteSet = set(ransomNote)
for item in ransomNoteSet:
if ransomNote.count(item)>magazine.count(item):
return False
return True
# Alternate solution:
# from collections import Counter
# def canConstruct(ransomNote, magazine):
# ransomNoteCount = Counter(ransomNote)
# magazineCount = Counter(magazine)
# for key in ransomNoteCount:
# if ransomNoteCount[key]>magazineCount[key]:
# return False
# return True
| [
"varunkumar032@gmail.com"
] | varunkumar032@gmail.com |
c8be73cd95d1fdc43b864eee4142eea6870b1356 | f7055e71c763d79abcef7283617df6d811375346 | /parlaskupine/utils_.py | 89134af1aa8bc921593cf9764abd7380d3de77b0 | [
"Unlicense"
] | permissive | VesterDe/parlalize | 8ec5aec20f3d2ae72e5485130031ba868554cdab | b725fe4b55b95f2ad3505aa70dac2474269ea3da | refs/heads/master | 2020-09-09T23:04:20.450691 | 2017-06-15T14:26:42 | 2017-06-15T14:26:42 | 94,447,286 | 1 | 0 | null | 2017-06-15T14:25:16 | 2017-06-15T14:25:16 | null | UTF-8 | Python | false | false | 611 | py | from operator import itemgetter
from .models import Organization
from datetime import datetime
from parlalize.settings import API_DATE_FORMAT
from django.http import JsonResponse
def getPgDataAPI(request, id_parladata, date_=None):
if not date_:
date_ = datetime.now().strftime(API_DATE_FORMAT)
org = Organization.objects.filter(id_parladata=id_parladata)
if org:
return JsonResponse(org[0].getOrganizationData())
else:
return JsonResponse({
'id': id_parladata,
'name': "unknown",
'acronym': "unknown",
})
| [
"tomazkunst@gmail.com"
] | tomazkunst@gmail.com |
3289ce4ce8028ae5827693a4a4d62674fdb8867e | a3af97d1110a7e60c53932e5cfbc0831ecb3ec78 | /projetos/urls.py | 35c61b22247a784fb59597c2470bfb48719eb06c | [] | no_license | guedesemerson/navedex | 7c33b291a72caeab29891138d3268702f7dc2dd7 | 0dc2fbaec4f7a7f7b9203a2bb11e5e0147355800 | refs/heads/master | 2022-12-03T12:49:55.490910 | 2020-08-16T20:43:13 | 2020-08-16T20:43:13 | 288,010,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | from django.urls import path
from .api.viewsets import (Index,
Store,
Delete,
Update,
Retrieve)
urlpatterns = [
path('index', Index.as_view()),
path('store', Store.as_view()),
path('show/<int:id>', Retrieve.as_view()),
path('delete/<int:id>', Delete.as_view()),
path('update/<int:id>', Update.as_view()),
] | [
"guedes.emerson@hotmail.com"
] | guedes.emerson@hotmail.com |
c0e93c7a79a1902452e4c9ddcc8ce49f3d5f8f9a | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/b06f7eade2cb0285fb36ade60484d980cb6f2d68def364bdccaea8075ac242f5/xml/parsers/expat/model.py | 540a8828ad5f498d0fa72cf68e8e707fda7dd85d | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # encoding: utf-8
# module xml.parsers.expat.model calls itself pyexpat.model
# from C:\Users\Doly\Anaconda3\lib\site-packages\sklearn\linear_model\cd_fast.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to interpret content model information. """
# no imports
# Variables with simple values
XML_CQUANT_NONE = 0
XML_CQUANT_OPT = 1
XML_CQUANT_PLUS = 3
XML_CQUANT_REP = 2
XML_CTYPE_ANY = 2
XML_CTYPE_CHOICE = 5
XML_CTYPE_EMPTY = 1
XML_CTYPE_MIXED = 3
XML_CTYPE_NAME = 4
XML_CTYPE_SEQ = 6
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
49a25dceb73a4332be16ac355cb8064b302b6927 | cb4e07b2a5dd30804ce428ec84d9e9f77709fcd5 | /swea/D4/5678. [Professional] 팰린드롬.py | 9a27a11b5210bc71a6372ee402db6ea3af5d54d6 | [] | no_license | jbsam2/algo_problem | 141c17003e88a69afdeea93a723e7f27c4626fdc | 18f2cab5a9af2dec57b7fd6f8218badd7de822e4 | refs/heads/master | 2023-05-18T10:03:00.408300 | 2021-06-02T10:36:50 | 2021-06-02T10:36:50 | 282,104,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | for t in range(int(input())):
s=input();r=1
for i in range(1,len(s)+1):
for j in range(len(s)-i+1):
p=s[j:j+i]
if p==p[::-1]:r=max(r,i)
print(f'#{t+1}',r) | [
"kbsam2@gmail.com"
] | kbsam2@gmail.com |
3bcca1c4065ab91c564c8c4f32a5519e832278be | fc72eba186256fa8b7f2f6b27b802af6bcd5c5c3 | /patterns/5.py | 422e82dad5bb7c5c5633c444796aca4647bfcda2 | [
"MIT"
] | permissive | OmnesRes/GRIMMER | 8d33878cf00d9e241cca4c52f603e4250478534f | 173c99ebdb6a9edb1242d24a791d0c5d778ff643 | refs/heads/master | 2021-01-21T17:10:23.159315 | 2017-04-15T16:38:09 | 2017-04-15T16:38:09 | 66,272,473 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | pattern_zero=[0.0, 0.16, 0.24, 0.4, 0.56, 0.64, 0.8, 0.96]
pattern_odd=[0.04, 0.2, 0.36, 0.44, 0.6, 0.76, 0.84]
pattern_even=[0.0, 0.16, 0.24, 0.4, 0.56, 0.64, 0.8, 0.96]
averages_even={0.0: [0.0], 0.8: [0.0], 0.4: [0.0], 0.96: [0.8, 0.2], 0.24: [0.6, 0.4], 0.16: [0.2, 0.8], 0.56: [0.8, 0.2], 0.64: [0.6, 0.4]}
averages_odd={0.36: [0.8, 0.2], 0.2: [0.0], 0.44: [0.6, 0.4], 0.6: [0.0], 0.76: [0.2, 0.8], 0.84: [0.6, 0.4], 0.04: [0.6, 0.4]} | [
"jordananaya@gmail.com"
] | jordananaya@gmail.com |
3aca9655153ffcd55b21dee39c568f740c904512 | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/KoubeiTradeItemorderBuyRequest.py | 037cc2e3a3272dc13f6b546ae46b7488e413434a | [
"Apache-2.0"
] | permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 3,936 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiTradeItemorderBuyModel import KoubeiTradeItemorderBuyModel
class KoubeiTradeItemorderBuyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiTradeItemorderBuyModel):
self._biz_content = value
else:
self._biz_content = KoubeiTradeItemorderBuyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.trade.itemorder.buy'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
de4ac45769abdafc96febca8486bb551b87659b0 | 412f4a6a5853486515136e1dc2fb2a98b1de8b78 | /flaskthreads.py | c05ead2c625c7877ef02c7a494e5a4c814a631e4 | [] | no_license | zwlyn/test | a43fca97af377e90fb0687c4dbf9f8e836365d5d | 91084cb42b959635c0071184222c4e0fdf077563 | refs/heads/master | 2020-08-21T13:45:47.323104 | 2019-10-22T10:03:35 | 2019-10-22T10:03:35 | 216,173,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | #-*- encoding:utf_8 -*-
from flask import Flask
app = Flask(__name__)
@app.route('/', methods = ['GET'])
def demo():
return "gunicorn and flask demo"
if __name__ == '__main__':
app.run(debug=True,threaded=True)
| [
"1666013677@qq.com"
] | 1666013677@qq.com |
3ebf236d3782198f90f3912a4a7291f05d6af50a | 3cbf4a9d14cd487520f2bd05db10542705a37baf | /h2o-py/tests/testdir_algos/gbm/pyunit_mnist_manyCols_gbm_large.py | a1a2d84e7b47c80122b6ee0faa516ca2e442bdff | [
"Apache-2.0"
] | permissive | KendraFabric/h2o-3 | 733ff021553ff2c2d8f0c3336450d886d029cf15 | c75bc5d2dc644cc8c09df755185a4cc6e34e0d1a | refs/heads/master | 2023-03-15T12:32:02.852026 | 2016-08-26T14:01:07 | 2016-08-26T14:27:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def mnist_many_cols_gbm_large():
train = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
train.tail()
from h2o.estimators.gbm import H2OGradientBoostingEstimator
gbm_mnist = H2OGradientBoostingEstimator(ntrees=1,
max_depth=1,
min_rows=10,
learn_rate=0.01)
gbm_mnist.train(x=range(784), y=784, training_frame=train)
gbm_mnist.show()
if __name__ == "__main__":
pyunit_utils.standalone_test(mnist_many_cols_gbm_large)
else:
mnist_many_cols_gbm_large()
| [
"spnrpa@gmail.com"
] | spnrpa@gmail.com |
bf8fe27306543b7b8e6f4c310bdaf5a2344ad562 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/abc/066d_2.py | 7ae18e84240a2c3ac692397a688c5f7d69c412a1 | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | from collections import Counter, defaultdict
class Combination:
def __init__(self, size, mod=10**9 + 7):
self.size = size + 2
self.mod = mod
self.fact = [1, 1] + [0] * size
self.factInv = [1, 1] + [0] * size
self.inv = [0, 1] + [0] * size
for i in range(2, self.size):
self.fact[i] = self.fact[i - 1] * i % self.mod
self.inv[i] = -self.inv[self.mod % i] * (self.mod // i) % self.mod
self.factInv[i] = self.factInv[i - 1] * self.inv[i] % self.mod
def npr(self, n, r):
if n < r or n < 0 or r < 0:
return 0
return self.fact[n] * self.factInv[n - r] % self.mod
def ncr(self, n, r):
if n < r or n < 0 or r < 0:
return 0
return self.fact[n] * (self.factInv[r] * self.factInv[n - r] % self.mod) % self.mod
def nhr(self, n, r): # 重複組合せ
return self.ncr(n + r - 1, n - 1)
def factN(self, n):
if n < 0:
return 0
return self.fact[n]
N = int(input())
A = list(map(int, input().split()))
MOD = 10**9 + 7
comb = Combination(N + 100)
cntA = Counter(A)
I = [a for a, c in cntA.items() if c == 2][0]
L, R = [i for i, a in enumerate(A) if a == I]
S = L + (N - R)
for i in range(1, N + 2):
if i == 1:
print(N)
continue
ans = comb.ncr(N + 1, i) - comb.ncr(S, i - 1)
print(ans % MOD)
| [
"y.oksaku@stu.kanazawa-u.ac.jp"
] | y.oksaku@stu.kanazawa-u.ac.jp |
5f38af46ffc3dc1c89cce266942afc556be35569 | e4713c248c857b06a3cb0e9d0d15dd5513b1a8e9 | /phonenumbers/shortdata/region_GW.py | 9cb488fa9b2cafd1bbdb0857102120a418808bf6 | [
"Apache-2.0",
"MIT"
] | permissive | igushev/fase_lib | 8f081e0f6b956b186dc759906b21dc3fc449f045 | 182c626193193b196041b18b9974b5b2cbf15c67 | refs/heads/master | 2023-05-14T14:35:05.727202 | 2022-04-15T23:55:37 | 2022-04-15T23:55:37 | 107,228,694 | 10 | 0 | MIT | 2023-05-01T19:38:09 | 2017-10-17T06:47:07 | Python | UTF-8 | Python | false | false | 658 | py | """Auto-generated file, do not edit by hand. GW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GW = PhoneMetadata(id='GW', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='11[378]', possible_number_pattern='\\d{3}', example_number='113', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='11[378]', possible_number_pattern='\\d{3}', example_number='113', possible_length=(3,)),
short_data=True)
| [
"igushev@gmail.com"
] | igushev@gmail.com |
ea521aba29bdd5c7f046695777ae845f38e51f2d | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_pntsprd.py | 7ae98fd1fc926c5de4f2ca0f970609c7bc661f1b | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.pntsprd import pntsprd
def test_pntsprd():
"""Test module pntsprd.py by downloading
pntsprd.csv and testing shape of
extracted data has 553 rows and 12 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = pntsprd(test_path)
try:
assert x_train.shape == (553, 12)
except:
shutil.rmtree(test_path)
raise()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
7da564ce249013583e2e383589c22437cbb733a9 | ffe4c155e228f1d3bcb3ff35265bb727c684ec1a | /UCL/Algorithms/Exercises/set_intersection.py | f9c84d7732de0af46c37f51969b8b12cbb821533 | [] | no_license | yuuee-www/Python-Learning | 848407aba39970e7e0058a4adb09dd35818c1d54 | 2964c9144844aed576ea527acedf1a465e9a8664 | refs/heads/master | 2023-03-12T00:55:06.034328 | 2021-02-28T13:43:14 | 2021-02-28T13:43:14 | 339,406,816 | 0 | 0 | null | 2021-02-28T11:27:40 | 2021-02-16T13:26:46 | Jupyter Notebook | UTF-8 | Python | false | false | 250 | py | def set_intersection(a, b):
intersection = [i for i in a if i in b] #O(nm)
intersection = list(set(a).intersection(set(b)))
intersection = list(set(a) & set(b))
intersection = list(filter(lambda x:x in a, b))
return intersection | [
"50982416+cyndereN@users.noreply.github.com"
] | 50982416+cyndereN@users.noreply.github.com |
80052d57664b2a589bf542dd229c9c48edc61736 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fv/asiteconnp.py | bd7efa2b92e54a40a0cde06c8751451bea5afafe | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 4,379 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ASiteConnP(Mo):
meta = ClassMeta("cobra.model.fv.ASiteConnP")
meta.isAbstract = True
meta.moClassName = "fvASiteConnP"
meta.moClassName = "fvASiteConnP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Intersite Anycast Connectivity Information"
meta.writeAccessMask = 0x1000001
meta.readAccessMask = 0x1000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Comp")
meta.concreteSubClasses.add("cobra.model.fv.SiteConnP")
meta.concreteSubClasses.add("cobra.model.fv.SiteConnPDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "bgpAsn", "bgpAsn", 33120, PropCategory.REGULAR)
prop.label = "Remote site ASN"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
meta.props.add("bgpAsn", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "id", "id", 30384, PropCategory.REGULAR)
prop.label = "Site ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
6989372758828a2c53b0c7642fc6846fe7f79be8 | 1abc7d220beb38893dd8427428bc27d07e4b5ec9 | /lncrawl/core/downloader.py | a6f130b76f207df11074c94eedaba8a2b1451432 | [
"Apache-2.0"
] | permissive | huangmhao/lightnovel-crawler | 53448ec85faef4ec708af854e226f7f5f45e15d5 | d94a9a69c330999f03b5cd0da609f126a7136104 | refs/heads/master | 2020-05-23T11:24:59.513225 | 2019-05-14T16:52:25 | 2019-05-14T16:52:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
To download chapter bodies
"""
import json
import logging
import os
import traceback
from concurrent import futures
from urllib.parse import urlparse
from progress.bar import IncrementalBar
logger = logging.getLogger('DOWNLOADER')
def downlod_cover(app):
app.book_cover = None
if app.crawler.novel_cover:
logger.info('Getting cover image...')
try:
ext = urlparse(app.crawler.novel_cover).path.split('.')[-1]
filename = os.path.join(
app.output_path, 'cover.%s' % (ext or 'png'))
if not os.path.exists(filename):
logger.info('Downloading cover image')
response = app.crawler.get_response(app.crawler.novel_cover)
with open(filename, 'wb') as f:
f.write(response.content)
# end with
logger.info('Saved cover: %s', filename)
# end if
app.book_cover = filename
except Exception:
logger.debug(traceback.format_exc())
# end try
# end if
if not app.book_cover:
logger.warn('No cover image')
# end if
# end def
def download_chapter_body(app, chapter):
result = None
dir_name = os.path.join(app.output_path, 'json')
if app.pack_by_volume:
vol_name = 'Volume ' + str(chapter['volume']).rjust(2, '0')
dir_name = os.path.join(dir_name, vol_name)
# end if
os.makedirs(dir_name, exist_ok=True)
chapter_name = str(chapter['id']).rjust(5, '0')
file_name = os.path.join(dir_name, chapter_name + '.json')
chapter['body'] = ''
if os.path.exists(file_name):
logger.info('Restoring from %s', file_name)
with open(file_name, 'r') as file:
old_chapter = json.load(file)
chapter['body'] = old_chapter['body']
# end with
# end if
if len(chapter['body']) == 0:
body = ''
try:
logger.info('Downloading to %s', file_name)
body = app.crawler.download_chapter_body(chapter)
except Exception:
logger.debug(traceback.format_exc())
# end try
if len(body) == 0:
result = 'Body is empty: ' + chapter['url']
else:
chapter['body'] = '<h3>%s</h3><h1>%s</h1>\n%s' % (
chapter['volume_title'], chapter['title'], body)
# end if
with open(file_name, 'w') as file:
file.write(json.dumps(chapter))
# end with
# end if
return result
# end def
def download_chapters(app):
downlod_cover(app)
bar = IncrementalBar('Downloading chapters', max=len(app.chapters))
bar.start()
if os.getenv('debug_mode') == 'yes':
bar.next = lambda: None # Hide in debug mode
# end if
futures_to_check = {
app.crawler.executor.submit(
download_chapter_body,
app,
chapter,
): str(chapter['id'])
for chapter in app.chapters
}
app.progress = 0
for future in futures.as_completed(futures_to_check):
result = future.result()
if result:
bar.clearln()
logger.error(result)
# end if
app.progress += 1
bar.next()
# end for
bar.finish()
# end def
| [
"dipu.sudipta@gmail.com"
] | dipu.sudipta@gmail.com |
3762dbc45fe80b9bda29d70e5924efe2586c0d09 | 2a0c2b3b682fdc7a49ff2ea107f53ac4b8fb5d20 | /pyLean/designMode/singletonPattern/SingleObject.py | 171a6a018d0ba94d002919f2289a12e5ce0a2e4a | [] | no_license | NoobsZero/DesignMode | 29f8327c09ecd8f26e9fc3c8618e5fba3de712b2 | 161997377020436491520a10fc3ac927469458f1 | refs/heads/master | 2023-07-08T04:24:59.776257 | 2021-08-17T05:55:46 | 2021-08-17T05:55:46 | 303,366,863 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,628 | py | # -*- encoding:utf-8 -*-
"""
@File :SingleObject.py
@Time :2020/10/12 15:29
@Author :Chen
@Software:PyCharm
单例模式(Singleton)---创建型
一、单例模式的使用场景
(1)、当创建一个对象所占用的资源很多,但同时又需要使用到该对象
(2)、当堆系统内的资源要求统一读写时,比如读写的配置信息,此时必须要求创建的实例信息相同
(3)、当有多个实例可能会引起程序错误时
总结:单例模式适用于只需要创建一个实例对象,程序全部使用同一个实例对象
二、实现方法
根据使用场景提炼出要点:
(1)、某个类只能有一个实例
(2)、必须要自行创建实例
(3)、必须向整个系统提供这个实例
实现方法:
(1)、只提供私有的构造方法
(2)、含有一个该类的静态私有对象
(3)、要提供一个静态的公用方法用于获取、创建私有对象
根据上面的描述,提供了俩种实现单例模式的方法分别为饿汉式和懒汉式
饿汉式:简单来说就是空间换时间,因为上来就实例化一个对象,占用了内存,(也不管你用还是不用)
懒汉式:简单的来说就是时间换空间,与饿汉式正好相反
"""
# 饿汉式
class EagerSingleton(object):
"""
饿汉式单例模式
实现预先加载,急切初始化,单例对象在类实例化前创建。
优点:
1、线程安全
2、在类实例化前已经创建好了一个静态对象,调用时反应速度快
3、直接执行其它方法或静态方法时,单例实例不会被初始化
缺点:
1、不管使用与否,实例化前就初始化静态对象,有点资源浪费。
"""
# 重写创建实例的__new__方法
_instance = None
def __new__(cls, *args, **kwargs):
# 如果类没有实例属性,进行实例化,否则返回实例
if not hasattr(cls, '_instance'):
cls.instance = super(EagerSingleton, cls).__new__(cls)
return cls._instance
@classmethod
def get_instance(cls):
return cls._instance
# 懒汉式
class LazySingleton(object):
"""
懒汉式单例模式
只有在使用时才创建单例对象,实例化时不创建。
优点:
1、资源利用合理,不调用get_instance方法不创建单例对象
缺点:
1、线程不安全,多线程时可能会获取到不同单例对象的情况。
解决办法是加互斥锁,但会降低效率
"""
__instance = None
def __init__(self):
if not self.__instance:
print('调用__init__,实例未创建')
else:
print('调用__init__,实例已经创建过了:', self.__instance)
@classmethod
def get_instance(cls):
# 调用get_instance类方法的时候才会生成Singleton实例
if not cls.__instance:
cls.__instance = LazySingleton()
return cls.__instance
"""
单例模式
由于Python语言中并没有构造函数私有化的手段,所以要使用另外的策略。
Python语言在构造新对象时要先调用__new__方法取得内存空间,然后调用__init__方法初始化该空间。
因此,在Python语言中为保证只生成一个实例,实现单例的方式就是重写__new__方法,通过这种方式实现的单例类Singleton。
"""
# Python中 _ 和 __ 的含义
# 在python的类中,没有真正的私有化,不管是方法还是属性,
# 为了编程的需要,约定加了下划线 _ 的属性和方法不属于API,不应该在类的外面访问,也不会被from M import * 导入。
# _*:类的私有属性或方法:不建议在类的外面直接调用这个属性或方法,但是也可以调用
# __*:python中的__和一项称为name mangling的技术有关,name mangling (又叫name decoration命名修饰).
# 在很多现代编程语言中,这一技术用来解决需要唯一名称而引起的问题,比如命名冲突/重载等.
# 单例类Singleton通过重写静态__new__方法来实现实例生成过程的控制。用户无论构建多少次该类的对象都会返回同一个结果。
if __name__ == '__main__':
# s3 = LazySingleton()
# s1 = LazySingleton.get_instance()
# s2 = LazySingleton.get_instance()
# print(id(s1), id(s2))
c1 = EagerSingleton()
c2 = EagerSingleton()
print(id(c1), id(c2))
| [
"870628995@qq.com"
] | 870628995@qq.com |
472834b4d7bf4d9680788d1d846bf3e5140eae51 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/3999/codes/1671_1105.py | 63959a8ddb44b8df9cb9991080829476eb2151b3 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # Teste seu código aos poucos. Não teste tudo no final, pois fica mais difícil de identificar erros.
# Ao testar sua solução, não se limite ao caso de exemplo. Teste as diversas possibilidades de saída
X=input()
print("Entrada:",X)
if(X=="lobo"):
Y="Stark"
elif(X=="leao"):
Y="Lannister"
elif(X=="veado"):
Y="Baratheon"
elif(X=="dragao"):
Y="Targaryen"
elif(X=="rosa"):
Y="Tyrell"
elif(X=="sol"):
Y="Martell"
elif(X=="lula"):
Y="Greyjoy"
elif(X=="esfolado"):
Y="Bolton"
elif(X=="turta"):
Y="Tully"
else:
Y="Brasao invalido"
if(Y!="Brasao invalido"):
print("Casa:",Y)
else:
print(Y)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
8569eaedce6a31b10c11a8aefda9b425e5200758 | bfda3af75d94767a5cb265bd68c17cfbf94e3ee1 | /euler/problem067.py | 44e0d08738522bf8639fb9237bda997501d62b89 | [] | no_license | orenlivne/euler | d0e5b956a46eacfe423fbd6c52918beb91eea140 | 2afdd8bccdc5789c233e955b1ca626cea618eb9b | refs/heads/master | 2020-12-29T02:24:36.479708 | 2016-12-15T21:27:33 | 2016-12-15T21:27:33 | 20,263,482 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | '''
============================================================
http://projecteuler.net/problem=67
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom in triangle.txt (right click and 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows.
NOTE: This is a much more difficult version of Problem 18. It is not possible to try every route to solve this problem, as there are 2^99 altogether! If you could check one trillion (1012) routes every second it would take over twenty billion years to check them all. There is an efficient algorithm to solve it. ;o)
Created on Feb 21, 2013
@author: Oren Livne <livne@uchicago.edu>
============================================================
'''
import numpy as np, sys, urllib2
from numpy.ma.testutils import assert_equal
def read_triangle(f):
'''Read triangle data from input Stream. Stream rows.'''
return (np.array(map(int, line.split(' ')), dtype=np.int) for line in f)
def max_total(triangle):
'''Max total of path in a triangle, top to bottom. In-place, saves one array.'''
MIN_INT = -sys.maxint - 1
for a in triangle: # Loop over rows, top to bottom
if len(a) > 1: # Initial condition
a = np.maximum(np.concatenate(([MIN_INT], prev)), #@UndefinedVariable
np.concatenate((prev, [MIN_INT]))) + a # Recursion @UndefinedVariable
prev = a
return np.max(prev) # Final condition
if __name__ == "__main__":
try:
# If Internet connection available
assert_equal(max_total(read_triangle(urllib2.urlopen('http://projecteuler.net/project/triangle.txt'))), 7273, 'Wrong sum')
except urllib2.URLError:
# If downloaded locally
assert_equal(max_total(read_triangle(urllib2.urlopen('problem067.dat', 'rb'))), 7273, 'Wrong sum')
| [
"oren.livne@gmail.com"
] | oren.livne@gmail.com |
fd7d6c7e681e3fd3e2c9f35e3d6d751638a4838b | d08802e22fd87494dce79f811439a7a19b6df1da | /src/sniffMyPackets/transforms/pcapture.py | e47d07f0b90c9652ff40aa71cd716a6470232a1b | [] | no_license | hcit/sniffMyPackets | e6c9597b9e237f0d45ef3242ee82e09ee5785f0e | 391c1f4707946d3a456a7d9b3e639bda6d88dff8 | refs/heads/master | 2021-01-19T05:24:44.476422 | 2013-04-18T12:53:03 | 2013-04-18T12:53:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | #!/usr/bin/env python
import logging, hashlib
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from time import time
from common.entities import Interface, pcapFile
#from canari.maltego.utils import debug, progress
from canari.framework import configure #, superuser
__author__ = 'catalyst256'
__copyright__ = 'Copyright 2013, Sniffmypackets Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'catalyst256'
__email__ = 'catalyst256@gmail.com'
__status__ = 'Development'
__all__ = [
'dotransform'
]
#@superuser
@configure(
label='Sniff Packets [pcap]',
description='Sniffs packets on interface and saves to file',
uuids=[ 'sniffMyPackets.v2.interface2pcap' ],
inputs=[ ( 'sniffMyPackets', Interface ) ],
debug=True
)
def dotransform(request, response):
interface = request.value
tstamp = int(time())
fileName = '/tmp/'+str(tstamp)+'.pcap'
if 'sniffMyPackets.count' in request.fields:
pktcount = int(request.fields['sniffMyPackets.count'])
else:
pktcount = 300
pkts = sniff(iface=interface, count=pktcount)
wrpcap(fileName, pkts)
sha1hash = ''
fh = open(fileName, 'rb')
sha1hash = hashlib.sha1(fh.read()).hexdigest()
e = pcapFile(fileName)
e.sha1hash = sha1hash
response += e
return response
| [
"catalyst256@gmail.com"
] | catalyst256@gmail.com |
9eb2dbbb86324a998b932f17626c77a7c4c9a350 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Clutter/Vertex.py | 5861047599940d3884e334b4a5c854d0dbff004d | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 6,063 | py | # encoding: utf-8
# module gi.repository.Clutter
# from /usr/lib64/girepository-1.0/Clutter-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Atk as __gi_repository_Atk
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class Vertex(__gi.Boxed):
"""
:Constructors:
::
Vertex()
alloc() -> Clutter.Vertex
new(x:float, y:float, z:float) -> Clutter.Vertex
"""
def alloc(self): # real signature unknown; restored from __doc__
""" alloc() -> Clutter.Vertex """
pass
def copy(self): # real signature unknown; restored from __doc__
""" copy(self) -> Clutter.Vertex """
pass
def equal(self, vertex_b): # real signature unknown; restored from __doc__
""" equal(self, vertex_b:Clutter.Vertex) -> bool """
return False
def free(self): # real signature unknown; restored from __doc__
""" free(self) """
pass
def init(self, x, y, z): # real signature unknown; restored from __doc__
""" init(self, x:float, y:float, z:float) -> Clutter.Vertex """
pass
def new(self, x, y, z): # real signature unknown; restored from __doc__
""" new(x:float, y:float, z:float) -> Clutter.Vertex """
pass
def _clear_boxed(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
x = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
y = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
z = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(Vertex), '__module__': 'gi.repository.Clutter', '__gtype__': <GType ClutterVertex (94911696720576)>, '__dict__': <attribute '__dict__' of 'Vertex' objects>, '__weakref__': <attribute '__weakref__' of 'Vertex' objects>, '__doc__': None, 'x': <property object at 0x7f54134ef180>, 'y': <property object at 0x7f54134ef270>, 'z': <property object at 0x7f54134ef360>, 'alloc': gi.FunctionInfo(alloc), 'new': gi.FunctionInfo(new), 'copy': gi.FunctionInfo(copy), 'equal': gi.FunctionInfo(equal), 'free': gi.FunctionInfo(free), 'init': gi.FunctionInfo(init)})"
__gtype__ = None # (!) real value is '<GType ClutterVertex (94911696720576)>'
__info__ = StructInfo(Vertex)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
7761132b27a0670dcab85da3b9ff48acc7fbbf81 | c06cab774533cbbc3fdcd27b0e7ce825aa4984cf | /scripts/motif_training/ex_fimo.py | b21ca2bcca7d85b455b31f2c81747e238601f714 | [] | no_license | cbragdon93/compbio_project2016 | 4dbaf16a9d11d1216f7063e247bbf74e1c9b1aca | cc72fb3252b940b9fbcd92bf6839c388cbe28106 | refs/heads/master | 2021-06-08T21:12:13.819895 | 2016-12-02T21:57:36 | 2016-12-02T21:57:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | """Script to run fimo on the entire genome/sequences"""
from subprocess import call
from itertools import chain
def run_fimo(meme_path, motif_file, sequence_file, op_folder, options):
"""Run fimo with the given options"""
command = [meme_path + './fimo']
others = list(chain(*zip(options.keys(), options.values())))
command += others
files = ['--oc', op_folder]
command += files
inputs = [motif_file, sequence_file]
command += inputs
shell_command = ' '.join(command)
print(shell_command)
call(shell_command, shell=True)
return None
def ex_fimo(meme_path, motif_file, sequence_file, op_folder, motif_id, thresh=0.0001):
options = {"--thresh": str(thresh), "--verbosity": str(1), "--motif": motif_id}
fimo_dir = '/'.join(op_folder.split('/')[:-1])
try:
call('mkdir '+fimo_dir, shell=True)
except:
pass
run_fimo(meme_path, motif_file, sequence_file, op_folder, options)
| [
"k.dileep1994@gmail.com"
] | k.dileep1994@gmail.com |
e1416db5c7ebb0645e06c66131b32c7ad64a6c08 | ddb3cae0628dc37cac63ebc1b6ebedea58c7fc8f | /0x05-python-exceptions/1-safe_print_integer.py | f090b0a5fa4d22922d7f499da6905e8454639614 | [] | no_license | ikki2530/holbertonschool-higher_level_programming | 1df42d49d7b4cf985d53bd5d2a1e55a9b90a195c | f0649aae6f2d96ea7e7c9a6e2e05a96177abb40e | refs/heads/master | 2022-12-20T11:13:29.263591 | 2020-09-26T14:20:02 | 2020-09-26T14:20:02 | 259,413,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | #!/usr/bin/python3
def safe_print_integer(value):
try:
val = isinstance(value, int)
if val:
print("{:d}".format(value))
return True
raise ValueError
except ValueError:
return False
| [
"dagomez2530@gmail.com"
] | dagomez2530@gmail.com |
3b6e37d389894b053906c58a8a0628422c2ee636 | 994c1b533fe64265715231d6458a9f316ce82cbf | /posts/migrations/0004_auto_20200723_0024.py | 0fe68c0368a1f6983003954c66d29b11a52904fc | [] | no_license | zerobubus/hw05_final | 07320a8fcc1119f28cf3bd5e3285d381470f7f47 | ed89bdd5fa088c71bdb127a780fc2eac3bcda47c | refs/heads/master | 2022-11-24T23:09:29.335507 | 2020-07-29T16:49:10 | 2020-07-29T16:49:10 | 282,685,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | # Generated by Django 2.2.9 on 2020-07-23 00:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20200702_1838'),
]
operations = [
migrations.AlterField(
model_name='group',
name='description',
field=models.TextField(max_length=100, verbose_name='описание'),
),
migrations.AlterField(
model_name='group',
name='title',
field=models.CharField(max_length=200, verbose_name='название'),
),
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='authors', to=settings.AUTH_USER_MODEL, verbose_name='автор'),
),
migrations.AlterField(
model_name='post',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='posts.Group', verbose_name='группа'),
),
migrations.AlterField(
model_name='post',
name='text',
field=models.TextField(verbose_name='текст'),
),
]
| [
"spb.etr@yandex.ru"
] | spb.etr@yandex.ru |
fb12c9895eecf78e4d6b1944e5c87b2610b09ca2 | 1bb6c157928441a7103ee42836c44657774049fe | /app1/utils/image_url.py | 05a2cfc9be6ca055526a25b85a14eac8b366fe88 | [
"MIT"
] | permissive | xieyu-aa/news | 525158dfdbee5fb5fc00a5a8d085f3ba61d3ec5e | 3c74fe43690fbfd7271a3f58c706ac722ecad619 | refs/heads/main | 2023-02-03T10:15:16.226983 | 2020-12-28T08:47:06 | 2020-12-28T08:47:06 | 319,926,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from qiniu import Auth, put_data
#需要填写你的 Access Key 和 Secret Key
access_key = 'QdYihoHdNftdfFtSdMr7ezeJb781HIh_FR-vxFdU'
secret_key = 'nLqJN9N3YR2NEx-Ngev3XMERt696ttpqA7SeM0lZ'
def image_url(image_data):
#构建鉴权对象
q = Auth(access_key, secret_key)
#要上传的空间
bucket_name = 'new3333'
#上传后保存的文件名
key = None
# 处理上传结果
token = q.upload_token(bucket_name, key, 3600)
ret, info = put_data(token, key, image_data)
print(ret)
print(info)
if info.status_code == 200:
return ret.get('key')
else:
return None
if __name__ == '__main__':
with open('./滑稽.jpg', 'rb') as f:
image_data = f.read()
image_url(image_data)
| [
"you@example.com"
] | you@example.com |
8a3e125e2cdcc5e0e1da6a1bc5c0231997aa01c6 | de77d2ebb336a32149bd8a9a3d4d50018f264c3b | /melange/examples/doc_examples/tutorial/consume.py | 70e4190df7c578428a85bd102cf7bc3ba32d7cce | [
"MIT"
] | permissive | Rydra/melange | 34c87b3f9dc726a6463ffa9f886dd243193d9a9b | a313a956b3442d62887b8a5ec35fcc6cd5d1eca2 | refs/heads/main | 2023-02-04T20:40:54.551991 | 2023-01-29T18:39:09 | 2023-01-29T18:39:09 | 101,487,734 | 10 | 3 | MIT | 2022-05-21T20:53:46 | 2017-08-26T13:33:26 | Python | UTF-8 | Python | false | false | 1,017 | py | from simple_cqrs.domain_event import DomainEvent
from melange import SimpleMessageDispatcher, SingleDispatchConsumer, consumer
from melange.backends import LocalSQSBackend
from melange.examples.doc_examples.tutorial.publish import MyTestMessage
from melange.serializers import PickleSerializer, SerializerRegistry
class MyConsumer(SingleDispatchConsumer):
@consumer
def on_my_test_message_received(self, event: MyTestMessage) -> None:
print(event.message)
if __name__ == "__main__":
serializer_settings = {
"serializers": {"pickle": PickleSerializer},
"serializer_bindings": {DomainEvent: "pickle"},
}
serializer_registry = SerializerRegistry(serializer_settings)
backend = LocalSQSBackend(host="localhost", port=9324)
consumer = MyConsumer()
message_dispatcher = SimpleMessageDispatcher(
consumer,
serializer_registry,
backend=backend,
)
print("Consuming...")
message_dispatcher.consume_loop("melangetutorial-queue")
| [
"davigetto@gmail.com"
] | davigetto@gmail.com |
e3508f2b0525429d3b32209655d8a032b09963e0 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2521/60765/284939.py | ac29a1ddd9459bb309375c78dd57491cf7eaf91b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
import re
import collections
def solve():
# =list(map(int,input().split()))
# =int(input())
# n =input()[2:-2].split('],[')
# target=int(input())
n=list(map(int,input()[1:-1].split(',')))
n=collections.Counter(n)
res=[]
count=[]
for i in n:
count.append([i,n[i]])
count.sort(reverse=True,key=lambda x:x[1])
while len(count)>1:
res.append(count[0][0])
count[0][1]-=1
res.append(count[1][0])
count[1][1] -= 1
if count[1][1] == 0:
count.pop(1)
if count[0][1] == 0:
count.pop(0)
count.sort(reverse=True, key=lambda x: x[1])
if len(count)>0:
res.append(count[0][0])
print(res)
solve() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f97ebb63ac535601bd997690464c36249a17b102 | c3649aec8b628cf39f30c9440423ecbb8a9bc3aa | /src/bentoml/_internal/configuration/helpers.py | 24a1c6343fc793edd731cdecc837867a2e4bee88 | [
"Apache-2.0"
] | permissive | parano/BentoML | 2488ad1baa3f948f925edbe6b0eb2ea458bdad17 | eaa6218eb805acd6016eb140a4e3a9d6818dd995 | refs/heads/main | 2023-07-07T06:34:41.571577 | 2023-03-14T08:07:32 | 2023-03-14T08:07:32 | 178,978,356 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,657 | py | from __future__ import annotations
import os
import socket
import typing as t
import logging
from typing import TYPE_CHECKING
from functools import singledispatch
import yaml
import schema as s
from ..utils import LazyLoader
from ...exceptions import BentoMLConfigException
if TYPE_CHECKING:
from types import ModuleType
logger = logging.getLogger(__name__)
TRACING_TYPE = ["zipkin", "jaeger", "otlp", "in_memory"]
def import_configuration_spec(version: int) -> ModuleType: # pragma: no cover
return LazyLoader(
f"v{version}",
globals(),
f"bentoml._internal.configuration.v{version}",
exc_msg=f"Configuration version {version} does not exist.",
)
@singledispatch
def depth(_: t.Any, _level: int = 0): # pragma: no cover
return _level
@depth.register(dict)
def _(d: dict[str, t.Any], level: int = 0, **kw: t.Any):
return max(depth(v, level + 1, **kw) for v in d.values())
def rename_fields(
d: dict[str, t.Any],
current: str,
replace_with: str | None = None,
*,
remove_only: bool = False,
):
# We assume that the given dictionary is already flattened.
# This function will rename the keys in the dictionary.
# If `replace_with` is None, then the key will be removed.
if depth(d) != 1:
raise ValueError(
"Given dictionary is not flattened. Use flatten_dict first."
) from None
if current in d:
if remove_only:
logger.warning("Field '%s' is deprecated and will be removed." % current)
d.pop(current)
else:
assert replace_with, "'replace_with' must be provided."
logger.warning(
"Field '%s' is deprecated and has been renamed to '%s'"
% (current, replace_with)
)
d[replace_with] = d.pop(current)
punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^`{|}~"""
def flatten_dict(
d: t.MutableMapping[str, t.Any],
parent: str = "",
sep: str = ".",
) -> t.Generator[tuple[str, t.Any], None, None]:
"""Flatten nested dictionary into a single level dictionary."""
for k, v in d.items():
k = f'"{k}"' if any(i in punctuation for i in k) else k
nkey = parent + sep + k if parent else k
if isinstance(v, t.MutableMapping):
yield from flatten_dict(
t.cast(t.MutableMapping[str, t.Any], v), parent=nkey, sep=sep
)
else:
yield nkey, v
def load_config_file(path: str) -> dict[str, t.Any]:
"""Load configuration from given path."""
if not os.path.exists(path):
raise BentoMLConfigException(
"Configuration file %s not found." % path
) from None
with open(path, "rb") as f:
config = yaml.safe_load(f)
return config
def get_default_config(version: int) -> dict[str, t.Any]:
config = load_config_file(
os.path.join(
os.path.dirname(__file__), f"v{version}", "default_configuration.yaml"
)
)
mod = import_configuration_spec(version)
assert hasattr(mod, "SCHEMA"), (
"version %d does not have a validation schema" % version
)
try:
mod.SCHEMA.validate(config)
except s.SchemaError as e:
raise BentoMLConfigException(
"Default configuration for version %d does not conform to given schema:\n%s"
% (version, e)
) from None
return config
def validate_tracing_type(tracing_type: str) -> bool:
return tracing_type in TRACING_TYPE
def validate_otlp_protocol(protocol: str) -> bool:
return protocol in ["grpc", "http"]
def ensure_larger_than(target: int | float) -> t.Callable[[int | float], bool]:
"""Ensure that given value is (lower, inf]"""
def v(value: int | float) -> bool:
return value > target
return v
ensure_larger_than_zero = ensure_larger_than(0)
def ensure_range(
lower: int | float, upper: int | float
) -> t.Callable[[int | float], bool]:
"""Ensure that given value is within the range of [lower, upper]."""
def v(value: int | float) -> bool:
return lower <= value <= upper
return v
def ensure_iterable_type(typ_: type) -> t.Callable[[t.MutableSequence[t.Any]], bool]:
"""Ensure that given mutable sequence has all elements of given types."""
def v(value: t.MutableSequence[t.Any]) -> bool:
return all(isinstance(i, typ_) for i in value)
return v
def is_valid_ip_address(addr: str) -> bool:
"""Check if given string is a valid IP address."""
try:
_ = socket.inet_aton(addr)
return True
except socket.error:
return False
| [
"noreply@github.com"
] | parano.noreply@github.com |
c90cb2c8edee52cb8802a752aa9af21056bc7d27 | b7be7c13ccfee7bf0e06e391f1d71c75276810e0 | /posts/api_urls.py | 87bdf991d33ce3f5cfbfc5f8624b5519c4e0569c | [] | no_license | eshandas/django_project_template_mongo | 0d9ec53918abca30b7d08ec8897681c20f30dc13 | 6e5c1a255ccbf7498ddca9a10eca9cfe2c97c8a9 | refs/heads/master | 2020-05-21T19:05:46.837946 | 2017-04-05T10:50:52 | 2017-04-05T10:50:52 | 65,455,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from django.conf.urls import url
from .api_views import (
PostAPI,
PostsAPI,
)
urlpatterns = [
url(r'^$', PostsAPI.as_view(), name='posts'),
url(r'^(?P<post_id>[a-zA-Z0-9]+)/$', PostAPI.as_view(), name='post'),
]
| [
"eshandasnit@gmail.com"
] | eshandasnit@gmail.com |
1ac8cfca27b83bf2beb4c6a26a8872a4b9b2a825 | 1858c78932d3e6cfeb46021176cff172457ee8bb | /polonator/fluidics/src/testing/excel_test.py | e4cd32bfd5269f7f4a6e11e16c562556157edd37 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-secret-labs-2011"
] | permissive | grinner/Polonator | c5ddbc592afb022f9a2dfd4381560046fcac0371 | 512a30ca382d8cdcf8287488bdc70cb0f37a241c | refs/heads/master | 2021-05-15T01:47:00.506126 | 2019-04-19T01:41:59 | 2019-04-19T01:41:59 | 1,393,059 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,930 | py | """
---------------------------------------------------------------------------
Author: Mirko Palla.
Date: February 13, 2008.
For: G.007 polony sequencer design [fluidics software] at the Church Lab -
Genetics Department, Harvard Medical School.
Purpose: This program contains the complete code for module Biochem,
containing re-occurring biochecmistry subroutines in Python.
This software may be used, modified, and distributed freely, but this
header may not be modified and must appear at the top of this file.
------------------------------------------------------------------------------
"""
import xlrd
book = xlrd.open_workbook("G007 Fluidic Volumes.xls") # read in Excel file into 'xlrd' object
sh = book.sheet_by_index(0) # create handle for first Excel sheet
#-----------------------------------------------------------------------------
# INTERNAL VOLUMES
#-----------------------------------------------------------------------------
i_volumes = {} # create dictionary for internal tubing volumes
for row in range(2, 44):
from_row = sh.cell_value(rowx=row, colx=0)
to_row = sh.cell_value(rowx=row, colx=1)
# From reagent block into manifold
block_to_manifold = sh.cell_value(rowx=row, colx=2)
tube_area_1 = sh.cell_value(rowx=row, colx=3)
tube_volume_1 = block_to_manifold * tube_area_1 # tube volume 1
# Manifold layer transition
manifold_transition = sh.cell_value(rowx=row, colx=5)
hole_area = sh.cell_value(rowx=row, colx=6)
transition_volume = manifold_transition * hole_area # transition_volume
# Manifold path length
manifold_path_length = sh.cell_value(rowx=row, colx=8)
path_area = sh.cell_value(rowx=row, colx=9)
path_volume = manifold_path_length * path_area # path volume
# From manifold to valve
manifold_to_valve = sh.cell_value(rowx=row, colx=11)
tube_area_2 = sh.cell_value(rowx=row, colx=12)
tube_volume_2 = manifold_to_valve * tube_area_2 # tube volume 2
#--------------------------- Volume dictionary creation -------------------
total_volume = tube_volume_1 + transition_volume + path_volume + tube_volume_2 # total volume sum
if not i_volumes.has_key(to_row):
i_volumes[to_row] = {}
"""
try:
i_volumes[to_row]
except KeyError:
i_volumes[to_row] = {}
"""
i_volumes[to_row][from_row] = total_volume
print "\n--> TESTING INTERNAL:", i_volumes
#-----------------------------------------------------------------------------
# EXTERNAL VOLUMES
#-----------------------------------------------------------------------------
e_volumes = {} # create dictionary for external tubing volumes
for row in range(47, 62):
from_row = sh.cell_value(rowx=row, colx=0)
to_row = sh.cell_value(rowx=row, colx=1)
# Tubing run length
tubing_run = sh.cell_value(rowx=row, colx=2)
cross_sectional_area = sh.cell_value(rowx=row, colx=4)
total_volume = tubing_run * cross_sectional_area # tubing path volume
#--------------------------- Volume dictionary creation ------------------
if not e_volumes.has_key(to_row):
e_volumes[to_row] = {}
e_volumes[to_row][from_row] = total_volume
print "\n\n--> TESTING EXTERNAL:", e_volumes
#-----------------------------------------------------------------------------
# MIXING CHAMBER VOLUMES
#-----------------------------------------------------------------------------
for row in range(63, 65):
from_row = sh.cell_value(rowx=row, colx=0)
to_row = sh.cell_value(rowx=row, colx=1)
if not e_volumes.has_key(to_row):
e_volumes[to_row] = {}
e_volumes[to_row][from_row] = sh.cell_value(rowx=row, colx=5)
print "\n\n--> TESTING MIXER:", e_volumes
print "\n--> FINISHED VOLUME TESTING\n"
| [
"a.grinner@gmail.com"
] | a.grinner@gmail.com |
9ddad79b454f8bbbcde4f02b07870ad2085c6658 | 4c83b4d7aca6bbcd15b922ad7314440fea7c9a70 | /2020-08-14_p3_idade/script_matriz1b_idade_2020-03-14.py | 84f86d5b5f3afb767b7a76567d19f46eaf04c2dc | [] | no_license | poloplanejamento/odmatrix-joinville | 63b60a85055700698cdb590c181e7c8a4d5c7361 | be7ce0814fb9dad2d289cd836dde51baa9c0850d | refs/heads/main | 2023-01-23T11:43:45.451126 | 2020-12-10T23:17:58 | 2020-12-10T23:17:58 | 320,402,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | #!/bin/env python3
from http.client import HTTPSConnection
from base64 import b64encode
import json
import csv
import pandas as pd
projectID = "42"
c = HTTPSConnection("api.odmatrix.app")
userAndPass = b64encode(b"fe6b53f0280443d5bd40d5d30694f356").decode("ascii")
headers = { 'Authorization' : 'Basic %s' % userAndPass }
finall_list = []
for fage in ["15_19","20_24","25_29","30_34","35_39","40_44","45_49","50_54","55_59","60_64","65_69","70_74","75_79","80_84","85_89","90_94","0_14"] :
for date in ["2020-03-14"] :
for ftriptype in ["microtrip","bus","private_transport"] :
request = "/generatematrix?format=json&project={}&date={}&ftriptype={}&fage={}".format(projectID, date, ftriptype, fage)
c.request('GET', request, headers=headers)
res = c.getresponse()
data = res.read()
matrix = json.loads(data)
print(request)
for i, column in enumerate(matrix['ColumnLabels']):
for j, row in enumerate(matrix['RowLabels']):
value = matrix['Data'][j][i]
if value == 0:
continue
full_row = {}
full_row['ProjectID'] = projectID
full_row['Date'] = date
full_row['Origin'] = row
full_row['Idade'] = fage
full_row['Destination'] = column
full_row['Modo'] = ftriptype
full_row['Trips'] = value
finall_list.append(full_row)
#print(finall_list)
data = pd.DataFrame(finall_list)
final_data = pd.pivot_table(data, index=['ProjectID', 'Date', 'Origin', 'Destination', 'Modo'], columns='Idade', values='Trips')
final_data.to_csv("matriz1b_classe_2020-03-14.csv")
| [
"caiocco@gmail.com"
] | caiocco@gmail.com |
bc2f34a32d87fbd2859ab96dae35ff0fbaec3316 | 7bfb0fff9d833e53573c90f6ec58c215b4982d14 | /1081_smallest_subsequence_of_distinct_chars.py | fe3f41a7c1e3916fdd167b07def09b2d06e37a68 | [
"MIT"
] | permissive | claytonjwong/leetcode-py | 6619aa969649597a240e84bdb548718e754daa42 | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | refs/heads/master | 2023-07-14T23:40:26.569825 | 2021-08-22T17:23:20 | 2021-08-22T17:23:20 | 279,882,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | #
# 1081. Smallest Subsequence of Distinct Characters
#
# Q: https://leetcode.com/problems/smallest-subsequence-of-distinct-characters/
# A: https://leetcode.com/problems/smallest-subsequence-of-distinct-characters/discuss/891644/Kt-Js-Py3-C%2B%2B-Monotonic-Queue-%2B-Detailed-Explanation
#
class Solution:
def smallestSubsequence(self, s: str) -> str:
A, have = [], set()
last = {c: i for i, c in enumerate(s)}
for i, c in enumerate(s):
while c not in have and len(A) and ord(c) < ord(A[-1]) and i < last[A[-1]]:
have.remove(A.pop())
if c not in have:
have.add(c); A.append(c)
return ''.join(A)
| [
"claytonjwong@gmail.com"
] | claytonjwong@gmail.com |
c6fcfdd18cc8d5c82a78017317b30abaa333af6d | ff3727418c813ec475fd6cc53787de80497eaf1e | /bobo/direction/carte/test/nouvel_pos.py | cdb231a34c34681ccfb71d3e3a3c5631295fb9e3 | [] | no_license | pastrouveedespeudo/Final-Project | 7141133ea432543a32914f3eb28d497521e338ba | a41d04817b22bfa04d22d9fc0fcfcd0c3e8c679a | refs/heads/master | 2022-12-19T13:32:01.831673 | 2019-08-08T20:43:40 | 2019-08-08T20:43:40 | 193,540,979 | 0 | 0 | null | 2022-11-22T03:54:51 | 2019-06-24T16:27:33 | Python | UTF-8 | Python | false | false | 2,946 | py | """Here we calculate the new
direction. We have calculate
a one degree = 111.11 km
so we *0.009 the number of
km for have degrees to km and add it to lat
if direction isn't sud, nord
east or west we * by degrees
of circle trigo"""
from math import cos
from math import radians
def long_lat_1(sens, lat, long, kilometre):
"""function to lighten long_lat"""
if sens == 'sudsudouest':
lat = lat + kilometre
long = long + kilometre * cos(radians(67.7))
elif sens == 'sudouest':
lat = lat + kilometre
long = long + kilometre * cos(radians(45))
elif sens == 'ouestsudouest':
lat = lat + kilometre
long = long + kilometre * cos(radians(22.5))
elif sens == 'ouestnordouest':
lat = lat - kilometre
long = long - kilometre * cos(radians(157.5))
elif sens == 'nordouest':
lat = lat - kilometre
long = long - kilometre * cos(radians(135))
elif sens == 'nordnordouest':
lat = lat - kilometre
long = long - kilometre * cos(radians(112.5))
elif sens == 'estsudest':
lat = lat + kilometre
long = long - kilometre * cos(radians(337))
elif sens == 'sudest':
lat = lat + kilometre
long = long - kilometre * cos(radians(315))
elif sens == 'sudsudest':
lat = lat + kilometre
long = long - kilometre * cos(radians(292.5))
return lat, long
def long_lat(lat, long, kiloms, sens):
"""We calculate new direction for example if
direction is sud, we take the current latitude
we add the kilometer. This is our new latitude.
For north east, we subtracted kilometer to latitude
and longitude and for longitude
multiply by cos(337) radiant !"""
try:
kiloms = float(kiloms)
except TypeError:
kiloms = 20.0
kiloms = float(kiloms)
except ValueError:
kiloms = 20.0
kiloms = float(kiloms)
if kiloms > 500:
kiloms = 20.0
kilometre = kiloms * 0.009
if sens == 'sud':
lat1 = kilometre
nouvel_lat = lat + lat1
lat = nouvel_lat
elif sens == 'nord':
lat1 = kilometre
nouvel_lat = lat - lat1
lat = nouvel_lat
elif sens == 'ouest':
long = long + kilometre
elif sens == 'est':
kilo = kilometre
long1 = long - kilo
long = long1
elif sens == 'nordnordest':
lat = lat - kilometre
long = long - kilometre * cos(radians(337))
elif sens == 'nordest':
lat = lat - kilometre
long = long - kilometre * cos(radians(315))
elif sens == 'estnordest':
lat = lat - kilometre
long = long - kilometre * cos(radians(292.5))
else:
lat, long = long_lat_1(sens, lat, long, kilometre)
return lat, long
| [
"noreply@github.com"
] | pastrouveedespeudo.noreply@github.com |
1f49ef3154e7a7d1b7fbe152bd62734bf5a80eb1 | ea42986fbffb82b57df399fb90f3b962a63fef45 | /quiz/migrations/0001_initial.py | 3fa36a39cdc22bc3f128b566833b4548b476d700 | [] | no_license | akhad97/Quiz_API | 2d0b28ed3e7c6a7fc1cabc5b91d5b73d7173e10c | f45a99faf0f0109799c5506066e19d22dc4c201a | refs/heads/master | 2023-03-27T10:09:49.741610 | 2021-03-29T09:26:36 | 2021-03-29T09:26:36 | 352,585,216 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,729 | py | # Generated by Django 3.1.7 on 2021-03-10 11:26
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('username', models.CharField(max_length=50, unique=True)),
('score', models.IntegerField(default=0)),
('email', models.EmailField(max_length=254, verbose_name='email address')),
('first_name', models.CharField(blank=True, max_length=255, verbose_name='First Name')),
('last_name', models.CharField(blank=True, max_length=255, verbose_name='Last Name')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=250)),
('is_correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='EmailVerification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('code', models.CharField(max_length=30)),
('expire_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=100)),
('order', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Quiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('code', models.CharField(max_length=30)),
('slug', models.SlugField(blank=True)),
('roll_out', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Quizzes',
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='QuizTaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.IntegerField(default=0)),
('completed', models.BooleanField(default=False)),
('date_finished', models.DateTimeField(null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('quiz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.quiz')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UsersAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='quiz.answer')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.question')),
('quiz_taker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.quiztaker')),
],
),
migrations.AddField(
model_name='question',
name='quiz',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='quizzes', to='quiz.quiz'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='quiz.question'),
),
migrations.AddField(
model_name='customuser',
name='quiz',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='quiz.quiz'),
),
migrations.AddField(
model_name='customuser',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| [
"ahadjon.abdullaev1997@gmail.com"
] | ahadjon.abdullaev1997@gmail.com |
9c8c7eff1b9ec6d1190a1964535c46aa88f98cfa | 93549eba4b184d79af808bd1e8846995553a1f92 | /dataMiningFrame_GammaLab/src/dependence.py | 30194a19db4b38ab2fc3fff49832a0e755507cd5 | [] | no_license | morindaz/audioEmotion_relative | 4d91e36f8716d5ba77eb7f170e5eac6b5a325a75 | 9ab298a37504dc6fb48e2ae9e34156ce8f78b8cf | refs/heads/master | 2021-04-27T00:08:58.069396 | 2018-03-04T05:11:22 | 2018-03-04T05:11:22 | 123,757,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | # coding=utf-8
'''
此文件中记录一些常用的配置文件,包含:
读入数据的位置csvPath
特征存放位置featurePath
模型位置modelPath+estimatorName
模型参数位置modelPath+paraName
'''
csvPath ='../data/selectedCombinedAllwithLabel.csv' #读入数据的位置
testPath = '../data/test.csv'
trainPath = '../data/train.csv'
featureBasic = '..//feature//'
featureName = 'cv0Clear30.csv' #特征的具体csv名字
featurePath = featureBasic+featureName
modelPath = '..//models//'
estimatorName = "estimator.model"
paramName = "param.pkl"
| [
"morindaz.mao@ucloud.cn"
] | morindaz.mao@ucloud.cn |
29995561366f597a2dfee137c6d6b0c14f411424 | d820c9b02da27d54520ae7a0557a939ed4c1d04a | /Level_1/직사각형별찍기.py | 348bb6c28e552584598e193f9722f12d9397cf0c | [] | no_license | Parkyunhwan/Programmers | fc42f83a92cdc2800c5ef0d2d925ebe099caa427 | 39ea61674a5e5bb569e8831bfbf4ba64597088f8 | refs/heads/master | 2020-12-02T09:08:29.530002 | 2020-10-23T13:06:21 | 2020-10-23T13:06:21 | 230,956,911 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # 정석풀이
## print("", end='') end기능을 통해 뒤에 어떤 문자가 올지 지정해줄 수 있다.
a, b = map(int, input().strip().split(' '))
answer=""
ina = []
for i in range(b):
for j in range(a):
print("*", end='')
print()
# 연산자 풀이
a, b = map(int, input().strip().split(' '))
answer=("*"*a + "\n")*b
print(answer) | [
"pyh8618@gmail.com"
] | pyh8618@gmail.com |
05ee715c3472b1cb03f10effc47b4b0c9e0bb6b4 | df694423318926cf0cac2d8a293a232713d5fc69 | /ads/templatetags/contacted.py | 1a97fe0e386a3b9a314f068bf113505393ef1885 | [] | no_license | ouhouhsami/sanscom | 3e29da2dbbd3f3e9e094ed4546e879f9bf985404 | 23fe68be45649173841832361208ebdb65ffa599 | refs/heads/master | 2020-12-24T14:44:50.521981 | 2015-04-24T12:23:49 | 2015-04-24T12:23:49 | 32,098,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from ads.models import Ad, Search, AdSearchRelation
from django import template
register = template.Library()
@register.filter(name='contacted')
def contacted(user, obj):
if obj.__class__ == Ad:
asr = AdSearchRelation.objects.filter(ad=obj, search__user=user).values_list('ad_contacted', flat=True)
if any(asr):
return True
return False
if obj.__class__ == Search:
asr = AdSearchRelation.objects.filter(search=obj, ad__user=user).values_list('search_contacted', flat=True)
if any(asr):
return True
return False
| [
"samuel.goldszmidt@gmail.com"
] | samuel.goldszmidt@gmail.com |
526ade87755fc44b5fb54f1538fc465866bf5842 | 65f7e25aeff0c400e9d82a6b70d746f02ff58c61 | /openstackclient/tests/identity/v2_0/test_identity.py | 8a50a48a063ddd1b90ae171f87a4b2d611e7dada | [
"Apache-2.0"
] | permissive | pombredanne/python-openstackclient | 877255c5d0962a5a202133675ca3199c4d36ec62 | 6fe687fdf662a7495b20a1d94f27bf557525af58 | refs/heads/master | 2020-12-29T02:47:08.116982 | 2013-09-12T19:49:41 | 2013-09-12T21:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from openstackclient.tests.identity.v2_0 import fakes
from openstackclient.tests import utils
AUTH_TOKEN = "foobar"
AUTH_URL = "http://0.0.0.0"
class TestIdentityv2(utils.TestCommand):
def setUp(self):
super(TestIdentityv2, self).setUp()
self.app.client_manager.identity = fakes.FakeIdentityv2Client(
endpoint=AUTH_URL,
token=AUTH_TOKEN,
)
| [
"dtroyer@gmail.com"
] | dtroyer@gmail.com |
db48951ba1c8b769d951e5206cf549f040cfc177 | 9f89fa328fa17eb86c19c7c44f2ec36a71e85a85 | /tests/test_video_upload.py | 007e69cef5c47b6befe3064f8731eb00167f52ce | [
"BSD-3-Clause"
] | permissive | bjoernh/django-cast | 5d7ca6e1a8ae0a364155716232fea663be44b977 | 89527f9c38ff04745c93f395c8111883210f0cf3 | refs/heads/master | 2020-05-01T06:05:52.679191 | 2019-03-23T08:15:19 | 2019-03-23T08:15:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | import pytest
from django.urls import reverse
from cast.models import Video
class TestVideoUpload:
@pytest.mark.django_db
def test_upload_video_not_authenticated(self, client, image_1px_io):
upload_url = reverse("cast:api:upload_video")
image_1px_io.seek(0)
r = client.post(upload_url, {"original": image_1px_io})
# redirect to login
assert r.status_code == 302
@pytest.mark.django_db
def test_upload_video_authenticated(self, client, user, image_1px_io):
# login
r = client.login(username=user.username, password=user._password)
self.called_create_poster = False
def set_called_create_poster():
self.called_create_poster = True
Video._saved_create_poster = Video._create_poster
Video._create_poster = lambda x: set_called_create_poster()
# upload
upload_url = reverse("cast:api:upload_video")
image_1px_io.seek(0)
r = client.post(upload_url, {"original": image_1px_io})
Video._create_poster = Video._saved_create_poster
assert r.status_code == 201
assert self.called_create_poster
assert int(r.content.decode("utf-8")) > 0
| [
"jochen@wersdoerfer.de"
] | jochen@wersdoerfer.de |
228c06865e9face05da10ba72bf837de0c38b96b | cd1132c39b02e8997a4da832f9c2b760caba1801 | /napari/layers/image/experimental/octree_level.py | fa1f9a433482d77c522caf33c00ce62a90338d12 | [
"BSD-3-Clause"
] | permissive | HarshCasper/napari | 8c9f7051afc36d492f9e30760fe07758bb91e338 | 3ed7d2db678f4012753f53b2d40cff9d34a8011f | refs/heads/master | 2023-03-19T01:27:58.473927 | 2021-03-15T05:41:29 | 2021-03-15T05:41:29 | 347,844,575 | 0 | 0 | BSD-3-Clause | 2021-03-15T05:39:00 | 2021-03-15T05:11:44 | Python | UTF-8 | Python | false | false | 6,852 | py | """OctreeLevelInfo and OctreeLevel classes.
"""
import logging
import math
from typing import Dict, List, Optional
import numpy as np
from ....types import ArrayLike
from .octree_chunk import OctreeChunk, OctreeChunkGeom, OctreeLocation
from .octree_util import OctreeMetadata
LOGGER = logging.getLogger("napari.octree")
class OctreeLevelInfo:
"""Information about one level of the octree.
This should be a NamedTuple.
Parameters
----------
meta : OctreeMetadata
Information about the entire octree.
level_index : int
The index of this level within the whole tree.
"""
def __init__(self, meta: OctreeMetadata, level_index: int):
self.meta = meta
self.level_index = level_index
self.scale = 2 ** self.level_index
base = meta.base_shape
self.image_shape = (
int(base[0] / self.scale),
int(base[1] / self.scale),
)
tile_size = meta.tile_size
scaled_size = tile_size * self.scale
self.rows = math.ceil(base[0] / scaled_size)
self.cols = math.ceil(base[1] / scaled_size)
self.shape_in_tiles = [self.rows, self.cols]
self.num_tiles = self.rows * self.cols
class OctreeLevel:
"""One level of the octree.
An OctreeLevel is "sparse" in that it only contains a dict of
OctreeChunks for the portion of the octree that is currently being
rendered. So even if the full level contains hundreds of millions of
chunks, this class only contains a few dozens OctreeChunks.
This was necessary because even having a null reference for every
OctreeChunk in a level would use too much space and be too slow to
construct.
Parameters
----------
slice_id : int
The id of the OctreeSlice we are in.
data : ArrayLike
The data for this level.
meta : OctreeMetadata
The base image shape and other details.
level_index : int
Index of this specific level (0 is full resolution).
Attributes
----------
info : OctreeLevelInfo
Metadata about this level.
_tiles : Dict[tuple, OctreeChunk]
Maps (row, col) tuple to the OctreeChunk at that location.
"""
def __init__(
self,
slice_id: int,
data: ArrayLike,
meta: OctreeMetadata,
level_index: int,
):
self.slice_id = slice_id
self.data = data
self.info = OctreeLevelInfo(meta, level_index)
self._tiles: Dict[tuple, OctreeChunk] = {}
def get_chunk(
self, row: int, col: int, create=False
) -> Optional[OctreeChunk]:
"""Return the OctreeChunk at this location if it exists.
If create is True, an OctreeChunk will be created if one
does not exist at this location.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
create : bool
If True, create the OctreeChunk if it does not exist.
Returns
-------
Optional[OctreeChunk]
The OctreeChunk if one existed or we just created it.
"""
try:
return self._tiles[(row, col)]
except KeyError:
if not create:
return None # It didn't exist so we're done.
rows, cols = self.info.shape_in_tiles
if row < 0 or row >= rows or col < 0 or col >= cols:
# The coordinates are not in the level. Not an exception because
# callers might be trying to get children just over the edge
# for non-power-of-two base images.
return None
# Create a chunk at this location and return it.
octree_chunk = self._create_chunk(row, col)
self._tiles[(row, col)] = octree_chunk
return octree_chunk
def _create_chunk(self, row: int, col: int) -> OctreeChunk:
"""Create a new OctreeChunk for this location in the level.
Parameters
----------
row : int
The row in the level.
col : int
The column in the level.
Returns
-------
OctreeChunk
The newly created chunk.
"""
level_index = self.info.level_index
meta = self.info.meta
layer_ref = meta.layer_ref
location = OctreeLocation(
layer_ref, self.slice_id, level_index, row, col
)
scale = self.info.scale
tile_size = self.info.meta.tile_size
scaled_size = tile_size * scale
pos = np.array(
[col * scaled_size, row * scaled_size], dtype=np.float32
)
data = self._get_data(row, col)
# Create OctreeChunkGeom used by the visual for rendering this
# chunk. Size it based on the base image pixels, not based on the
# data in this level, so it's exact.
base = np.array(meta.base_shape[::-1], dtype=np.float)
remain = base - pos
size = np.minimum(remain, [scaled_size, scaled_size])
geom = OctreeChunkGeom(pos, size)
# Return the newly created chunk.
return OctreeChunk(data, location, geom)
def _get_data(self, row: int, col: int) -> ArrayLike:
"""Get the chunk's data at this location.
Parameters
----------
row : int
The row coordinate.
col : int
The column coordinate.
Returns
-------
ArrayLike
The data at this location.
"""
tile_size = self.info.meta.tile_size
array_slice = (
slice(row * tile_size, (row + 1) * tile_size),
slice(col * tile_size, (col + 1) * tile_size),
)
if self.data.ndim == 3:
array_slice += (slice(None),) # Add the colors.
return self.data[array_slice]
def log_levels(levels: List[OctreeLevel], start_level: int = 0) -> None:
"""Log the dimensions of each level nicely.
We take start_level so we can log the "extra" levels we created but
with their correct level numbers.
Parameters
----------
levels : List[OctreeLevel]
Print information about these levels.
start_level : int
Start the indexing at this number, shift the indexes up.
"""
from ...._vendor.experimental.humanize.src.humanize import intword
def _dim_str(dim: tuple) -> None:
return f"({dim[0]}, {dim[1]}) = {intword(dim[0] * dim[1])}"
for index, level in enumerate(levels):
level_index = start_level + index
image_str = _dim_str(level.info.image_shape)
tiles_str = _dim_str(level.info.shape_in_tiles)
LOGGER.info(
"Level %d: %s pixels -> %s tiles",
level_index,
image_str,
tiles_str,
)
| [
"noreply@github.com"
] | HarshCasper.noreply@github.com |
f553473cad4b347a8d994f981d9e3b036e3d894d | a9f4434d3b410886ffc10aa5aede3634692152b6 | /0219/ex9.py | 7338c30194e1471d7f625e24a070bbb39ba10eb9 | [] | no_license | parka01/python_ex | d3690dcd8753864c335bf7782553719a072bd01d | a5811487516eb9ef86d5ae93e9060cac267b87ce | refs/heads/main | 2023-03-13T08:35:03.837790 | 2021-02-26T03:40:41 | 2021-02-26T03:40:41 | 339,892,972 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | num1=input('1 번째 성적 입력: ')
num2=input('2 번째 성적 입력: ')
num3=input('3 번째 성적 입력: ')
num4=input('4 번째 성적 입력: ')
num5=input('5 번째 성적 입력: ')
if num1>0 or num1<100:
print('유효한 성적이 아닙니다.')
num2=input(2 번째 성적 입력: )
if num1>0 or num1<100:
print('유효한 성적이 아닙니다.')
else:
print(1 번째 성적:)
num3=input(3 번째 성적 입력: )
if num1>0 or num1<100:
print('유효한 성적이 아닙니다.')
else:
print(1 번째 성적:)
print(3 번째 성적:)
num4=input(4 번째 성적 입력: )
print(4 번째 성적:)
num5=input(5 번째 성적 입력: )
else:
print(5 번째 성적:)
print(5 번째 성적:)
print('총점: ')
print('평균: ')
#----------모범답안----------
sub=1
total=0
while sub<=5:
score=int(input(str(sub)+'번째 점수 입력: '))
if (score>=0 and score<=100):
total=total+score
print(sub, "번째 성적:",score)
sub=sub+1
else:
print('유효한 성적이 아닙니다.')
print('총점: ', total)
print('평균: ', total/5) | [
"68191916+parka01@users.noreply.github.com"
] | 68191916+parka01@users.noreply.github.com |
0a7355f1e458c39857cd28e18a0a5d8508a9bf0c | 3ff1c245d945acf82e48f388d2457204e202275f | /desafio/migrations/0010_auto_20201020_0921.py | 5d4b58297cbbe8d0e9195a30c4618331810f70fb | [] | no_license | rauldosS/desafio_compiladores | 075e7dcb3a167d20d71928727db6c1cb500e23af | da01adf41c47dafd50b1487bb4ad8d27c4f2d199 | refs/heads/main | 2023-01-03T09:13:18.990618 | 2020-10-29T01:25:59 | 2020-10-29T01:25:59 | 305,174,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # Generated by Django 3.1.2 on 2020-10-20 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('desafio', '0009_auto_20201020_0858'),
]
operations = [
migrations.AlterModelOptions(
name='caracteres',
options={'ordering': ('id', 'caractere'), 'verbose_name': 'Caractere', 'verbose_name_plural': 'Caracteres'},
),
migrations.AlterModelOptions(
name='linhas',
options={'ordering': ('id', 'linha'), 'verbose_name': 'Linha', 'verbose_name_plural': 'Linhas'},
),
migrations.AddField(
model_name='modelformarquivo',
name='versao',
field=models.IntegerField(default=1, verbose_name='Versao'),
preserve_default=False,
),
]
| [
"48498755+rauldosS@users.noreply.github.com"
] | 48498755+rauldosS@users.noreply.github.com |
40167e3cf9afb33c2cca023593a06ae0fd03b096 | 6809b17768d492866abf102318a5dcd2a03e8239 | /learnpythonthehardway/Binary-Search-Tree-to-Greater-Sum-Tree-5050.py | aa726b1e2475bad48db5c7abcd603bb4bcc3617d | [] | no_license | dgpllc/leetcode-python | 28428430a65d26d22f33d8c7e41089f6a783743a | 340ae58fb65b97aa6c6ab2daa8cbd82d1093deae | refs/heads/master | 2020-06-01T14:42:17.162579 | 2019-06-03T14:42:43 | 2019-06-03T14:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | # Given the root of a binary search tree with distinct values, modify it so that every node has a new value equal to
# the sum of the values of the original tree that are greater than or equal to node.val.
#
# As a reminder, a binary search tree is a tree that satisfies these constraints:
#
# The left subtree of a node contains only nodes with keys less than the node's key.
# The right subtree of a node contains only nodes with keys greater than the node's key.
# Both the left and right subtrees must also be binary search trees.
#
#
# Example 1:
#
#
#
# Input: [4,1,6,0,2,5,7,null,null,null,3,null,null,null,8]
# Output: [30,36,21,36,35,26,15,null,null,null,33,null,null,null,8]
#
#
# Note:
#
# The number of nodes in the tree is between 1 and 100.
# Each node will have value between 0 and 100.
# The given tree is a binary search tree.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def bstToGst(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
arr = []
def dfs(node):
if not node:
return
dfs(node.left)
arr.append(node)
dfs(node.right)
dfs(root)
for i in xrange(len(arr) - 2, -1, -1):
arr[i].val += arr[i + 1].val
return root
| [
"xin_wei@intuit.com"
] | xin_wei@intuit.com |
522f0be76de3348a09f76d4c24dce5e68bc11941 | 3ccd609f68016aad24829b8dd3cdbb535fb0ff6d | /python/bpy/types/CyclesLightSettings.py | b90e2afb63612b6f9fcee2bf33dd98e67c9fb350 | [] | no_license | katharostech/blender_externs | 79b2eed064fd927e3555aced3e2eb8a45840508e | fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d | refs/heads/master | 2020-04-11T14:00:29.393478 | 2018-10-01T00:40:51 | 2018-10-01T00:40:51 | 161,838,212 | 1 | 1 | null | 2018-12-14T20:41:32 | 2018-12-14T20:41:32 | null | UTF-8 | Python | false | false | 160 | py | class CyclesLightSettings:
cast_shadow = None
is_portal = None
max_bounces = None
samples = None
use_multiple_importance_sampling = None
| [
"troyedwardsjr@gmail.com"
] | troyedwardsjr@gmail.com |
66714611f024ba03584d28323952e6ecab755f56 | d5d94c992d0596080ba694c518dfdb58d3490847 | /0017/my_answer.py | d10ac9df9d6c0541772faa4ae39cc960e14d9e31 | [] | no_license | calgagi/leetcode | 1bf24b750e44c2c893935983e5d88e0f071d9f2d | 431aba979d92e331f2f92a07eb80167a823a49bd | refs/heads/master | 2022-11-17T11:26:01.596496 | 2020-07-19T06:56:04 | 2020-07-19T06:56:04 | 276,207,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | class Solution:
def letterCombinations(self, digits: str) -> List[str]:
self.nums = ["", "", "abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz"]
self.r = []
if len(digits) == 0:
return self.r
self.backtrack(digits, "")
return self.r
def backtrack(self, digits: str, curr: str) -> None:
if len(digits) == 0:
self.r.append(curr)
return
for c in self.nums[int(digits[0])]:
self.backtrack(digits[1:], curr + c)
| [
"calgagi@gmail.com"
] | calgagi@gmail.com |
406be3af63c84a41d0e928882a6e6c507a54bd59 | 3b1242e49b72ecc5d7e80c2a0d833a5876affd29 | /src/crs_talker.py | 3f79150be2dedc84a0739e3acfd4766f845a4347 | [] | no_license | raysmith619/queen_bee_net | 09a29ffa11b9c12cc53506d9b7b7c3d7bad35854 | 6aaf8bf539271d57cb501d58a3ff41eabc0e89c7 | refs/heads/main | 2023-01-08T05:00:25.755818 | 2020-11-03T22:42:39 | 2020-11-03T22:42:39 | 303,788,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,631 | py | # crs_talker.py 06Oct2020 crs, Paired down from crs_drone_bee, drone_bee
"""
UDP - chat client (drone bee)
Usage: <pgm> [hostname/ip]
default: localhost
"our_name": uses gethostname()
Uses tkinter to graphically display talker with
messages from other talkers
Sends messages to host
"""
import socket
import sys
import os
import tkinter as tk
port = 12345 # com port
sock_timeout = 5
print(sys.argv)
pgm = sys.argv[0]
our_name = socket.gethostname()
our_ip = socket.gethostbyname(our_name)
print(f"{pgm}\n run on {our_name} {our_ip}")
qbinfo = "qb.txt" # Get host, info from file
if os.path.exists(qbinfo):
with open(qbinfo) as qbif:
qbinfo_line = qbif.read()
host, port = qbinfo_line.split()
port = int(port)
if len(sys.argv) > 1:
host_name = sys.argv[1]
if host_name == "our_name":
host_name = our_name # Used internal name
host = host_name
if len(sys.argv) > 2:
port = sys.argv[2]
port = int(port)
host_ip = socket.gethostbyname(host)
print(f"host:{host} IP: {host_ip} port: {port}")
family = socket.AF_INET
sock_type = socket.SOCK_DGRAM
sock = socket.socket(family, sock_type)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(sock_timeout)
print("Press the space key to Enter msg")
def print_if_msg():
ck_time = .01
try:
sock.settimeout(ck_time)
rep_data, rep_addr = sock.recvfrom(1024)
rep_msg = rep_data.decode("utf-8")
from_loc, msg = rep_msg.split(maxsplit=1)
print(f"{from_loc}({rep_addr}): {msg}")
finally:
sock.settimeout(sock_timeout)
mw = tk.Tk()
msg_entry = None # Set when ready
def msg_proc():
print("entry change")
msg = msg_entry.get
data = bytes(msg, "utf-8")
sock.sendto(data, (host, port)) # Sending message to UDP server
sock.settimeout(sock_timeout)
try:
rep_data, rep_addr = sock.recvfrom(1024)
rep_msg = rep_data.decode("utf-8")
print(f"\tReply:{rep_msg} from {rep_addr}")
except:
print(f"\tNo reply after {sock_timeout} seconds")
ml_frame = tk.Frame(mw)
ml_frame.pack(side=tk.TOP)
ml_label = tk.Label(ml_frame, text="Enter Message")
ml_label.pack(side = tk.LEFT)
ml_send_btn = tk.Button(ml_frame, text="Send")
ml_send_btn.pack(side=tk.RIGHT)
msg_entry = tk.Entry(mw, bd =5)
msg_entry.pack(side = tk.TOP)
msg_label = tk.Label(mw, text="From Others")
msg_label.pack(side = tk.TOP)
msg_entry = tk.Entry(mw, bd=5)
msg_entry.pack(side=tk.TOP)
tk.mainloop()
| [
"noreply@github.com"
] | raysmith619.noreply@github.com |
2e1cf3b0394156d2a894a0b65fc8e26942c7dcd1 | 58e0f9966c896e9559486e104246cb7b87814d4f | /server/src/uds/transports/SPICE/scripts/macosx/tunnel.py | fd402709740044d7ee414e9d2f500f3b9c6ca17c | [] | no_license | hanwoody/openuds | 53d02d9274663f05b2fbba532d5cd5ba0e63d229 | 6868e471c564b7153e0787fd9dcf67afa489472f | refs/heads/master | 2023-01-14T16:10:33.121913 | 2020-11-20T13:40:18 | 2020-11-20T13:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | # This is a template
# Saved as .py for easier editing
from __future__ import unicode_literals
# pylint: disable=import-error, no-name-in-module, undefined-variable
import os
import subprocess
from uds import tools # @UnresolvedImport
from uds.forward import forward # @UnresolvedImport
remoteViewer = '/Applications/RemoteViewer.app/Contents/MacOS/RemoteViewer'
if not os.path.isfile(remoteViewer):
raise Exception('''<p>You need to have installed virt-viewer to connect to this UDS service.</p>
<p>
Please, install appropriate package for your system.
</p>
<p>
<a href="http://people.freedesktop.org/~teuf/spice-gtk-osx/dmg/0.5.7/RemoteViewer-0.5.7-1.dmg">Open download page</a>
</p>
<p>
Please, note that in order to UDS Connector to work correctly, you must copy the Remote Viewer app to your Applications Folder.<br/>
Also remember, that in order to allow this app to run on your system, you must open it one time once it is copied to your App folder
</p>
''')
theFile = sp['as_file_ns']
if sp['port'] != '-1':
forwardThread1, port = forward(sp['tunHost'], sp['tunPort'], sp['tunUser'], sp['tunPass'], sp['ip'], sp['port'])
if forwardThread1.status == 2:
raise Exception('Unable to open tunnel')
else:
port = -1
if sp['secure_port'] != '-1':
theFile = sp['as_file']
if port != -1:
forwardThread2, secure_port = forwardThread1.clone(sp['ip'], sp['secure_port'])
else:
forwardThread2, secure_port = forward(sp['tunHost'], sp['tunPort'], sp['tunUser'], sp['tunPass'], sp['ip'], sp['secure_port'])
if forwardThread2.status == 2:
raise Exception('Unable to open tunnel')
else:
secure_port = -1
theFile = theFile.format(
secure_port=secure_port,
port=port
)
filename = tools.saveTempFile(theFile)
subprocess.Popen([remoteViewer, filename])
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
03a630af197f2b420553a89fa48f12c265d3e2c6 | a41e1498e3c080f47abd8e8e57157548df3ebbf1 | /pandas/_libs/json.pyi | bc4fe68573b94290cfba2e66950c1b6d45ccf0dc | [
"BSD-3-Clause"
] | permissive | pandas-dev/pandas | e7e639454a298bebc272622e66faa9829ea393bb | c7325d7e7e77ecb4a4e57b48bc25265277c75712 | refs/heads/main | 2023-09-01T12:42:07.927176 | 2023-09-01T11:14:10 | 2023-09-01T11:14:10 | 858,127 | 36,166 | 18,728 | BSD-3-Clause | 2023-09-14T21:18:41 | 2010-08-24T01:37:33 | Python | UTF-8 | Python | false | false | 496 | pyi | from typing import (
Any,
Callable,
)
def ujson_dumps(
obj: Any,
ensure_ascii: bool = ...,
double_precision: int = ...,
indent: int = ...,
orient: str = ...,
date_unit: str = ...,
iso_dates: bool = ...,
default_handler: None
| Callable[[Any], str | float | bool | list | dict | None] = ...,
) -> str: ...
def ujson_loads(
s: str,
precise_float: bool = ...,
numpy: bool = ...,
dtype: None = ...,
labelled: bool = ...,
) -> Any: ...
| [
"noreply@github.com"
] | pandas-dev.noreply@github.com |
4777ab6677fa53f20a528eeea798e0d34d0e52ce | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Jm4eKTENReSiQFw9t_2.py | 52ec433dfcf75a72f84bb522dfbafc075eb7e429 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | """
Create a function that takes a list of numbers `lst` and returns an **inverted
list**.
### Examples
invert_list([1, 2, 3, 4, 5]) ➞ [-1, -2, -3, -4, -5]
invert_list([1, -2, 3, -4, 5]) ➞ [-1, 2, -3, 4, -5]
invert_list([]) ➞ []
### Notes
* Don't forget to return the result.
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
invert_list = lambda l:[-x for x in l]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
87e445fdfb8e230b20e39d5730c4626dd30bbd3b | b2d06ad8145cbfe92835d62899f004dc207ad1b5 | /docs/releasenotes/_ext/extralinks.py | 0c1c134bcccd87a16677ddb27e7235acf1f70e69 | [
"MIT"
] | permissive | reviewboard/ReviewBot | 8027a9eb308b8c01f6d47e0372f543beff655014 | b59b566e127b5ef1b08f3189f1aa0194b7437d94 | refs/heads/master | 2023-06-10T00:25:11.506154 | 2023-05-31T22:36:34 | 2023-05-31T22:36:34 | 3,355,797 | 110 | 26 | MIT | 2020-11-05T08:56:37 | 2012-02-04T22:20:19 | Python | UTF-8 | Python | false | false | 1,536 | py | """Sphinx plugins for special links in the Release Notes."""
from __future__ import unicode_literals
from docutils import nodes, utils
def setup(app):
app.add_config_value('bugtracker_url', '', True)
app.add_role('bug', bug_role)
app.add_role('cve', cve_role)
def bug_role(role, rawtext, text, linenum, inliner, options={}, content=[]):
try:
bugnum = int(text)
if bugnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'Bug number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text,
line=linenum)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
bugtracker_url = inliner.document.settings.env.config.bugtracker_url
if not bugtracker_url or '%s' not in bugtracker_url:
msg = inliner.reporter.error('bugtracker_url must be configured.',
line=linenum)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = bugtracker_url % bugnum
node = nodes.reference(rawtext, 'Bug #' + utils.unescape(text),
refuri=ref, **options)
return [node], []
def cve_role(role, rawtext, text, linenum, inliner, options={}, content=[]):
ref = 'http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s' % text
node = nodes.reference(rawtext, 'CVE-' + utils.unescape(text),
refuri=ref, **options)
return [node], []
| [
"trowbrds@gmail.com"
] | trowbrds@gmail.com |
30f6dedd0154090c998e834c8d4659c17018e4ee | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/ME.py/A1.py | 1448c25b4be0d5ca606657bc0e3799fa716385d6 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 608 | py | ##
## PROBLEM SOLVING ALGORITHM
##
def solve(N):
if N==0:
return "INSOMNIA"
digits = [False for _ in range(10)]
n = N
while True:
for digit in str(n):
digits[int(digit)] = True
if not (False in digits):
return n
n += N
##
## MAIN LOOP: read(from stdin) - solve - print(to stdout)
##
T = int(input())
for t in range(T):
## read case
N = int(input())
## solve and print result
result = solve(N)
print('Case #'+str(t+1)+': '+str(result))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
91e4b646687b8addbe5fbc3eef8e4b959ea6a397 | 11f473b6cf8aaf33152134f1862cdf2146980f92 | /fatiando/tests/mesher/test_tesseroid_mesh.py | bb53b6d6fd7cd0675dd8e4ae02b627077f427c24 | [
"BSD-3-Clause"
] | permissive | smudog/fatiando | 5856e061c15b066359b2ac5b2178d572b0f5ed3d | c4aec7a977be231217c23f4b767fea80171e1dd3 | refs/heads/master | 2021-01-20T09:42:29.594420 | 2016-12-19T10:46:11 | 2016-12-19T10:46:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | from ...mesher import TesseroidMesh
import numpy as np
def test_tesseroid_mesh_copy():
orig = TesseroidMesh((0, 1, 0, 2, 3, 0), (1, 2, 2))
cp = orig.copy()
assert cp is not orig
assert orig.celltype == cp.celltype
assert orig.bounds == cp.bounds
assert orig.dump == cp.dump
orig.addprop('density', 3300 + np.zeros(orig.size))
cp = orig.copy()
assert np.array_equal(orig.props['density'], cp.props['density'])
| [
"leouieda@gmail.com"
] | leouieda@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.