blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a66551f87eb36b1e691f96a0feabe209abb8312c | 09d564aaab98f72dce6585e78a0642c9fe3539f4 | /网站代码1.0/weixinweb/apps/mobile/models.py | 5dc9b1f7fcc96875d1c1a237baf574a30175f56f | [] | no_license | everydayxy/xy_py | 4b983b4bccc843602f1ea0b1d5ea9576119604bf | 08b314e7ecb10e13394aa93b92084c53596834f3 | refs/heads/master | 2020-04-03T08:52:44.729729 | 2019-09-20T15:05:35 | 2019-09-20T15:05:35 | 134,683,779 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,969 | py | # coding:utf8
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
from django.contrib.auth.models import AbstractUser
class MobileInfo(models.Model):
current_user = models.CharField(verbose_name=u'当前正在使用的用户', max_length=20, blank=True, null=True)
last_user = models.CharField(verbose_name=u'上一次使用的用户', max_length=20, blank=True, null=True)
mobile_type = models.CharField(verbose_name=u'设备名称', max_length=50, blank=True, null=True)
type = models.CharField(verbose_name=u'设备分类', max_length=50, blank=True, null=True)
number = models.CharField(verbose_name=u'设备型号', max_length=50, blank=True, null=True)
color = models.CharField(verbose_name=u'设备颜色', max_length=20, blank=True, null=True)
sn = models.CharField(verbose_name=u'设备SN码', max_length=100, blank=True, null=True, unique=True)
mobile_state = models.CharField(verbose_name=u'手机状态', choices=(('use', '使用'), ('free', '空闲')), max_length=20, default=u'free')
remark = models.TextField(verbose_name=u'备注', blank=True, null=True)
update_time = models.DateTimeField(verbose_name=u'更新时间', default=datetime.now)
class Meta:
verbose_name = '设备借用记录'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.mobile_type
class OperationLog(models.Model):
user = models.CharField(verbose_name=u'操作用户', max_length=20)
operation = models.CharField(verbose_name=u'操作', max_length=20)
entity = models.CharField(verbose_name=u'操作实体', max_length=100)
operation_date = models.DateTimeField(verbose_name=u'操作发生时间', default=datetime.now)
class Meta:
verbose_name = '设备借用日志记录'
verbose_name_plural = verbose_name
| [
"everydayx@163.com"
] | everydayx@163.com |
7411b04ed6a0bff0bc657def68b6c9fa9cf7f0cb | 3fcbbc1a2a262ed3b8fc9d7183c81b5f09445a85 | /build/sicktoolbox/catkin_generated/pkg.installspace.context.pc.py | 9d2217b97ba5e955f2f3bb4b4691a6df7c83928d | [] | no_license | karry3775/SimulationWS | 0861071c7d1b0f0372dc3515983bf8e092b37d07 | 754336d88d40e8e7d9e677bedff37505df7eaf4d | refs/heads/master | 2021-05-24T12:01:15.763154 | 2020-04-16T02:33:22 | 2020-04-16T02:33:22 | 253,547,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lSickLD;-lSickLMS1xx;-lSickLMS2xx".split(';') if "-lSickLD;-lSickLMS1xx;-lSickLMS2xx" != "" else []
PROJECT_NAME = "sicktoolbox"
PROJECT_SPACE_DIR = "/home/kartik/Documents/gazebo_practice_ws/install"
PROJECT_VERSION = "1.0.104"
| [
"kartikprakash3775@gmail.com"
] | kartikprakash3775@gmail.com |
cee00c7369c093b7b1f812c0801539a249639e22 | 8775b2e1aaa8a31954d96710ebe6f53978ce2beb | /python/consistent-hashing/exampl3.py | a39f8deae6e7bca4fcced071e86408fe6c4c4202 | [] | no_license | bruceSz/learnToexcellent | 9933cd3c959fa6e5d234776a6d5e1cc45dde1bd3 | 24d6b795506d7c698a28514379a7dc48576817fd | refs/heads/master | 2016-09-06T08:23:29.738516 | 2014-12-29T09:19:01 | 2014-12-29T09:19:01 | 7,304,982 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | from bisect import bisect_left
from hashlib import md5
from struct import unpack_from
NODE_COUNT = 100
NEW_NODE_COUNT = 101
DATA_ID_COUNT = 1000000
node_range_starts = []
for node_id in xrange(NODE_COUNT):
node_range_starts.append(DATA_ID_COUNT/
NODE_COUNT*node_id)
new_node_range_starts = []
for new_node_id in xrange(NEW_NODE_COUNT):
new_node_range_starts.append(DATA_ID_COUNT/
NEW_NODE_COUNT*new_node_id)
moved_ids = 0
for data_id in xrange(DATA_ID_COUNT):
data_id = str(data_id)
hsh = unpack_from('>I',md5(data_id).digest())[0]
node_id = bisect_left(node_range_starts,hsh%DATA_ID_COUNT)%NODE_COUNT
new_node_id = bisect_left(new_node_range_starts,hsh%DATA_ID_COUNT)%NEW_NODE_COUNT
if node_id != new_node_id:
moved_ids += 1
percent_moved = 100.0* moved_ids/DATA_ID_COUNT
print '%d ids moved, %.02f%%' %(moved_ids,percent_moved)
| [
"zsyuyizhang@gmail.com"
] | zsyuyizhang@gmail.com |
45db5890711c68a9a639683a4b75be40c00f7ef3 | eddbc7f562c394034fdcc80f2856691126473d7e | /toutvcli/progressbar.py | 64c47152b69c753359f029410025b13921bc2e72 | [] | no_license | eepp/pytoutv | 575dcf01f41be7202e57fe2669275eca57a543cd | 3be8763e473e462470a9044aa30b0220a2749fe9 | refs/heads/master | 2021-01-16T22:47:17.375958 | 2015-04-01T18:08:48 | 2015-04-01T18:08:48 | 30,171,148 | 0 | 0 | null | 2015-02-02T04:32:14 | 2015-02-02T04:32:13 | null | UTF-8 | Python | false | false | 4,680 | py | # Copyright (c) 2014, Philippe Proulx <eepp.ca>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of pytoutv nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Philippe Proulx BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import shutil
try:
from termcolor import colored
_has_termcolor = True
except ImportError:
_has_termcolor = False
class ProgressBar:
def __init__(self, filename, segments_count):
self._filename = filename
self._segments_count = segments_count
@staticmethod
def _get_terminal_width():
return shutil.get_terminal_size()[0]
def _get_bar_widget(self, width):
total_segments = self._total_segments
inner_width = width - 2
plain = round(total_segments / self._segments_count * inner_width)
empty = inner_width - plain
empty_s = '-' * empty
if _has_termcolor:
empty_s = colored(empty_s, attrs=['dark'])
bar = '[{}{}]'.format('#' * plain, empty_s)
return bar
def _get_percent_widget(self, width):
total_segments = self._total_segments
percent = int(total_segments / self._segments_count * 100)
base = '{}%'.format(percent)
return base.rjust(width)
def _get_segments_widget(self, width):
total_segments = self._total_segments
base = '{}/{}'.format(total_segments, self._segments_count)
return base.rjust(width)
def _get_size_widget(self, width):
total_bytes = self._total_bytes
if total_bytes < (1 << 10):
base = '{} B'.format(total_bytes)
elif total_bytes < (1 << 20):
base = '{:.1f} kiB'.format(total_bytes / (1 << 10))
elif total_bytes < (1 << 30):
base = '{:.1f} MiB'.format(total_bytes / (1 << 20))
else:
base = '{:.1f} GiB'.format(total_bytes / (1 << 30))
return base.rjust(width)
def _get_filename_widget(self, width):
filename_len = len(self._filename)
if filename_len < width:
s = self._filename.ljust(width)
else:
s = '{}...'.format(self._filename[:width - 3])
if _has_termcolor and self._total_segments != self._segments_count:
s = colored(s, attrs=['bold'])
return s
def get_bar(self, total_segments, total_bytes):
# Different required widths for widgets
self._total_segments = total_segments
self._total_bytes = total_bytes
term_width = ProgressBar._get_terminal_width()
percent_width = 5
size_width = 12
segments_width = len(str(self._segments_count)) * 2 + 4
padding = 1
fixed_width = percent_width + size_width + segments_width + padding
variable_width = term_width - fixed_width
filename_width = round(variable_width * 0.6)
bar_width = variable_width - filename_width
# Get all widgets
wpercent = self._get_percent_widget(percent_width)
wsize = self._get_size_widget(size_width)
wsegments = self._get_segments_widget(segments_width)
wfilename = self._get_filename_widget(filename_width)
wbar = self._get_bar_widget(bar_width)
# Build line
line = '{}{}{} {}{}'.format(wfilename, wsize, wsegments, wbar,
wpercent)
return line
| [
"eeppeliteloop@gmail.com"
] | eeppeliteloop@gmail.com |
8327d8d856a104430f484664172a940ed1f53f66 | 7ff07fc1e37e73ee84fb57c99befc803b4370d13 | /grafana.py | 1468fe4ddcefbd6bb71a03cbd02f39f35669f78e | [] | no_license | Kundjanasith/sparksample_ssw2 | 5048965a53d31fb97e268c42c72ce45b6745cccc | 5d08418392bc69e5fc97c6e9220ac4c8dc7eb731 | refs/heads/master | 2021-08-05T14:06:00.714954 | 2017-10-31T09:46:27 | 2017-10-31T09:46:27 | 108,869,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,015 | py | from subprocess import call
import os
import sys
call(["echo","Kundjanasith Thonglek"])
file = open(sys.argv[1], "r")
lines = []
for line in file:
lines.append(line)
blockManagerInfo_list = []
memoryStore_list = []
for line in lines:
l = line.split(" ")
if l[3]=="MemoryStore:":
memoryStore_list.append(l)
if l[3]=="BlockManagerInfo:":
blockManagerInfo_list.append(l)
def MB2KB(x):
res = 0
if x[-2:]=="MB":
res = float(x[:-2])*1000
if x[-2:]=="KB":
res = float(x[:-2])
return res
file_b = open(sys.argv[2],"w")
file_b.write("USED,FREE\n")
for b in blockManagerInfo_list:
if b[4]=="Added":
print("Added")
storage = b[7]
print("STORAGE : "+storage)
size = (b[11]+b[12])[:-1]
print("SIZE : "+size)
free = (b[14]+b[15])[:-2]
print("FREE : "+free)
sql_used = "'insert b_used value="+str(MB2KB(size))+"'"
sql_free = "'insert b_free value="+str(MB2KB(free))+"'"
print(sql_used)
print(sql_free)
file_b.write(str(MB2KB(size))+","+str(MB2KB(free))+"\n")
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_used)
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_free)
elif b[4]=="Removed":
print("Removed")
storage = b[9]
print("STORAGE : "+storage)
if storage == "disk":
size = (b[11]+b[12])[:-2]
print("SIZE : "+size)
sql_used = "'insert b_used value="+str(MB2KB(size))+"'"
sql_free = "'insert b_free value="+str(MB2KB(free))+"'"
print(sql_used)
print(sql_free)
file_b.write(str(MB2KB(size))+","+str(MB2KB(free))+"\n")
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_used)
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_free)
if storage == "memory":
size = (b[11]+b[12])[:-1]
print("SIZE : "+size)
free = (b[14]+b[15])[:-2]
print("FREE : "+free)
sql_used = "'insert b_used value="+str(MB2KB(size))+"'"
sql_free = "'insert b_free value="+str(MB2KB(free))+"'"
print(sql_used)
print(sql_free)
file_b.write(str(MB2KB(size))+","+str(MB2KB(free))+"\n")
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_used)
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_free)
elif b[4]=="Updated":
print("Updated")
storage = b[7]
print("STORAGE : "+storage)
current = (b[12]+b[13])[:-1]
print("CURRENT : "+current)
original = (b[16]+b[17])[:-2]
print("ORIGINAL : "+original)
sql_used = "'insert b_used value="+str(MB2KB(size))+"'"
sql_free = "'insert b_free value="+str(MB2KB(free))+"'"
print(sql_used)
print(sql_free)
file_b.write(str(MB2KB(size))+","+str(MB2KB(free))+"\n")
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_used)
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_free)
else:
print("ERROR")
file_m = open(sys.argv[3],"w")
file_m.write("USED,FREE\n")
for m in memoryStore_list:
if m[4]=="Block":
print("Block")
size = (m[13]+m[14])[:-1]
print("SIZE : "+size)
free = (m[16]+m[17])[:-2]
print("FREE : "+free)
sql_used = "'insert m_used value="+str(MB2KB(size))+"'"
sql_free = "'insert m_free value="+str(MB2KB(free))+"'"
print(sql_used)
print(sql_free)
file_m.write(str(MB2KB(size))+","+str(MB2KB(free))+"\n")
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_used)
os.system("influx -host 203.185.71.2 -database sparkmon -execute "+sql_free)
elif m[4]=="MemoryStore":
print("MemoryStore")
else:
print("ERROR")
| [
"you@example.com"
] | you@example.com |
30d4e85e1077e8d42e2e9c6a0ffe0e90a0fd137f | 2876a5a8e7d50d97039b4e63c25f5eaf1cc20808 | /src/odontology/register/urls.py | f2530fce308de54e12a5780202808bb1810d1fda | [
"Apache-2.0"
] | permissive | nanomolina/JP | 6fcd01b75d71aa560781d4c0350ff76025f85f92 | 248a47bced4dac850f85d28968ddf279cd123400 | refs/heads/master | 2022-11-29T09:31:43.449654 | 2019-07-16T18:25:20 | 2019-07-16T18:25:20 | 51,620,989 | 2 | 0 | Apache-2.0 | 2022-11-22T01:11:25 | 2016-02-12T22:33:24 | HTML | UTF-8 | Python | false | false | 1,226 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^benefit/new/(?P<patient_id>[0-9]+)/$', views.new_benefit, name='new_benefit'),
url(r'^benefit/edit/(?P<patient_id>[0-9]+)/$', views.edit_benefit, name='edit_benefit'),
url(r'^benefit_detail/(?P<patient_id>[0-9]+)/edit/(?P<detail_id>[0-9]+)/$', views.edit_benefit_detail, name='edit_benefit_detail'),
url(r'^benefit/(?P<patient_id>[0-9]+)/to_pdf/(?P<bf_id>[0-9]+)/$', views.benefit_to_pdf, name='benefit_to_pdf'),
url(r'^odontogram/edit/(?P<patient_id>[0-9]+)/$', views.edit_odontogram, name='edit_odontogram'),
url(r'^acumulate_benefit/(?P<patient_id>[0-9]+)/$', views.acumulate_benefit, name='acumulate_benefit'),
url(r'^radiography/(?P<patient_id>[0-9]+)/edit/(?P<bf_id>[0-9]+)/$', views.edit_radiography, name='edit_radiography'),
url(r'^record/new/(?P<patient_id>[0-9]+)/$', views.new_record, name='new_record'),
url(r'^record/edit/(?P<record_id>[0-9]+)/$', views.edit_record, name='edit_record'),
url(r'^record/remove/(?P<record_id>[0-9]+)/$', views.remove_record, name='remove_record'),
url(r'^record/account/edit/(?P<record_id>[0-9]+)/$', views.edit_record_account, name='edit_record_account'),
]
| [
"nanomolinacav@gmail.com"
] | nanomolinacav@gmail.com |
c655aa65ce4def25a201e67dd41360bd9c307f25 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2015_07_01/operations/_metric_definitions_operations.py | ebae6587c5bd6430c0d29a62dcb999ab6c17ec2f | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 6,020 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class MetricDefinitionsOperations(object):
"""MetricDefinitionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2015_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_uri, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.MetricDefinitionCollection"]
"""Lists the metric definitions for the resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param filter: Reduces the set of data collected by retrieving particular metric definitions
from all the definitions available for the resource.:code:`<br>`For example, to get just the
definition for the 'CPU percentage' counter: $filter=name.value eq '\Processor(_Total)\%
Processor Time'.:code:`<br>`Multiple metrics can be retrieved by joining together *'name eq
:code:`<value>`'* clauses separated by *or* logical operators.:code:`<br>`\ **NOTE**\ : No
other syntax is allowed.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricDefinitionCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2015_07_01.models.MetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricDefinitionCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('MetricDefinitionCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/metricDefinitions'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
4b860b7547cdff2cb7948a03bb0453c4ebd0d485 | 29466643ce80aa5e5e6868ec836ab71c8d35f0e1 | /exampleproject/tests/test_theme_loaders.py | 142cb0040dda880e578baa67bcc5f1e45b8c55e7 | [
"MIT"
] | permissive | priestd09/django-vest | 80a59d1e87e5570dc67aa529c65ab87b219f424e | 284acb70cce53be6653ef26e6133f6ce09129051 | refs/heads/master | 2021-01-18T07:29:25.471461 | 2015-06-21T15:22:54 | 2015-06-21T15:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | # coding: utf-8
from django.test import override_settings
from django.contrib.auth import get_user_model
from django.template.base import TemplateDoesNotExist
from django.core.urlresolvers import reverse
from django.utils.text import force_text
from django_vest.test import TestCase
from django_vest.templates_loaders import DJANGO_ORIGIN
class TemplateLoaderTestCase(TestCase):
@override_settings(CURRENT_THEME='main_theme', DEFAULT_THEME='main_theme')
def test_default_theme(self):
""" Default theme is used. Index page opened.
Dont have are parent.
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
templates = get_templates_used(response)
self.assertEqual(len(templates), 1)
self.assertIn('index.html', templates)
@override_settings(CURRENT_THEME='dark_theme', DEFAULT_THEME='main_theme')
def test_dark_theme(self):
""" Second theme is used. Index page opened.
Must be extended from default theme index.html.
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
templates = get_templates_used(response)
self.assertEqual(len(templates), 2)
self.assertIn('index.html', templates)
self.assertIn('DEFAULT_THEME/index.html', templates)
@override_settings(CURRENT_THEME='unknow', DEFAULT_THEME='main_theme')
def test_unknow_current_theme(self):
""" Testing of behavior with invalid `CURRENT_THEME` name.
"""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
templates = get_templates_used(response)
self.assertEqual(len(templates), 1)
self.assertIn('index.html', templates)
@override_settings(CURRENT_THEME='unknow', DEFAULT_THEME='unknow')
def test_unknow_all_themes(self):
""" Testing of behavior with invalid themes names.
"""
self.assertRaises(TemplateDoesNotExist, lambda: self.client.get('/'))
@override_settings(CURRENT_THEME=None, DEFAULT_THEME=None)
def test_themes_not_set(self):
self.assertRaises(TemplateDoesNotExist, lambda: self.client.get('/'))
class AppsTemplateLoaderTestCase(TestCase):
""" TestCase for `django_vest.template_loaders.AppsLoader`
"""
@classmethod
def setUpClass(cls):
super(AppsTemplateLoaderTestCase, cls).setUpClass()
cls.User = get_user_model()
cls.username = cls.password = 'user'
cls.email = 'user@users.com'
cls.user = cls.User.objects.create_superuser(cls.username, cls.email,
cls.password)
cls.url = reverse('admin:auth_user_changelist')
def setUp(self):
self.client.login(username=self.username, password=self.password)
def test_override_origin_template(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
templates = get_templates_used(response)
self.assertIn(DJANGO_ORIGIN, ','.join(templates))
self.assertIn(u'Template has been overridden',
force_text(response.content))
def get_templates_used(response):
return [t.name for t in response.templates
if t.name is not None]
| [
"zero13cool@yandex.ru"
] | zero13cool@yandex.ru |
679f8136203aa13c9c009d2734ad216682687690 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4476/codes/1723_2506.py | fde97dfe81580c09c0a2b8c82cf67437628af87f | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | qit = int(input("digite o valor: "))
cat = int(input("digite o valor: "))
qtv = int(input("digite o valor: "))
percat = cat/100
ano = 0
while ((qit > 0) and (qit < 12000)):
qit = (qit + (qit*percat)) - qtv
ano = ano + 1
if (qit<0):
print("EXTINCAO")
if (qit>12000):
print("LIMITE")
print(ano)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
87868c3a5b35301410c73fce0c952c6a4f7d335d | 7d90019c8f480a4dd65202a901b37dae1c1f6064 | /nikl_m.py | 7c5149878ada36da2c36520464955fbbf071a67c | [
"MIT"
] | permissive | r9y9/deepvoice3_pytorch | d3e85f54d46e809f6fffc0d619e0b4a9d1b13488 | f90255c96177c344cd18b5a52651b420a4d8062d | refs/heads/master | 2023-08-23T08:00:32.174896 | 2023-06-29T18:32:26 | 2023-06-29T18:32:26 | 108,992,863 | 1,964 | 511 | NOASSERTION | 2023-08-11T16:51:15 | 2017-10-31T12:31:44 | Python | UTF-8 | Python | false | false | 3,408 | py | from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import audio
import re
from hparams import hparams
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
'''Preprocesses the LJ Speech dataset from a given input path into a given output directory.
Args:
in_dir: The directory where you have downloaded the LJ Speech dataset
out_dir: The directory to write the output into
num_workers: Optional number of worker processes to parallelize across
tqdm: You can optionally pass tqdm to get a nice progress bar
Returns:
A list of tuples describing the training examples. This should be written to train.txt
'''
# We use ProcessPoolExecutor to parallize across processes. This is just an optimization and you
# can omit it and just call _process_utterance on each input if you want.
# You will need to modify and format NIKL transcrption file will UTF-8 format
# please check https://github.com/homink/deepspeech.pytorch.ko/blob/master/data/local/clean_corpus.sh
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
spk_id = {}
with open(in_dir + '/speaker.mid', encoding='utf-8') as f:
for i, line in enumerate(f):
spk_id[line.rstrip()] = i
index = 1
with open(in_dir + '/metadata.txt', encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = parts[0]
text = parts[1]
uid = re.search(r'([a-z][a-z][0-9][0-9]_t)', wav_path)
uid = uid.group(1).replace('_t', '')
futures.append(executor.submit(
partial(_process_utterance, out_dir, index + 1, spk_id[uid], wav_path, text)))
index += 1
return [future.result() for future in tqdm(futures)]
def _process_utterance(out_dir, index, speaker_id, wav_path, text):
'''Preprocesses a single utterance audio/text pair.
This writes the mel and linear scale spectrograms to disk and returns a tuple to write
to the train.txt file.
Args:
out_dir: The directory to write the spectrograms into
index: The numeric index to use in the spectrogram filenames.
wav_path: Path to the audio file containing the speech input
text: The text spoken in the input audio file
Returns:
A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
'''
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
if hparams.rescaling:
wav = wav / np.abs(wav).max() * hparams.rescaling_max
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
# Write the spectrograms to disk:
spectrogram_filename = 'nikl-multi-spec-%05d.npy' % index
mel_filename = 'nikl-multi-mel-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example:
return (spectrogram_filename, mel_filename, n_frames, text, speaker_id)
| [
"zryuichi@gmail.com"
] | zryuichi@gmail.com |
5cdd88f7c21c0017de1a55407ca223f3307271ac | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_286/ch33_2020_03_20_19_48_52_677032.py | f93bf527b67821368199a82586a0a894c3c90081 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | def eh_primo (numero):
if numero == 1 or numero == 0:
return False
elif numero == 2:
return True
else:
div = 2
cont = 0
while div < numero:
if numero % div == 0:
cont += 1
div += 1
if cont == 0:
return True
else:
return False
def lista_primos(valor_max):
lista = list(range(1 , valor_max + 1))
lista_final = []
for item in lista:
if eh_primo(item):
lista_final.append(item)
return lista_final
def primos_entre(menor, maior):
lista = lista_primos(maior)
lista_final = []
for item in lista:
if item >= menor:
lista_final.append(item)
return lista_final | [
"you@example.com"
] | you@example.com |
898dfad221e5ead2cf716da8027383746d0d5bd4 | 3d8141cded162c3ccd7273e979b1ba60eceab4c7 | /test/reports/common-games/User/test_report_user_today_for_the_week.py | 98ba12bdcd770040c7ac59f8ab8ac236e584fcb5 | [
"Apache-2.0"
] | permissive | FearFactor1/S3 | 563e2de6de03b284367fa52da25753156aaa8b4a | 2c94197a9e6f23ca5a47815b4e80541074440c03 | refs/heads/master | 2021-06-25T05:32:28.658247 | 2020-11-16T07:51:46 | 2020-11-16T07:51:46 | 164,873,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | # Отчет за неделю + с галочкой Кассовый отчёт + Обычные + Пользователь + Текущая неделя
def test_report_user_today_for_the_week(app):
app.report.open_page_report()
app.report.select_user()
app.report.select_checkbox_for_the_week()
app.report.button_get_report()
app.report.parser_report_text()
assert "ОТЧЕТ ЗА НЕДЕЛЮ" in app.report.parser_report_text()
assert "ИТОГИ ПО ПОЛЬЗОВАТЕЛЮ" in app.report.parser_report_text()
assert "Продавец: 2000006809-0020003510" in app.report.parser_report_text()
assert " Пользователь :0020003510" in app.report.parser_report_text()
assert app.report.beginning_of_the_week_C() in app.report.parser_report_text()
assert app.report.current_day_Po() in app.report.parser_report_text()
assert 'Продажи' in app.report.parser_report_text()
assert 'Отмены' in app.report.parser_report_text()
assert 'Выплаты' in app.report.parser_report_text()
assert 'ИТОГО ПО КАССЕ' in app.report.parser_report_text()
app.report.comeback_main_page() | [
"zelezodoroznik@yandex.ru"
] | zelezodoroznik@yandex.ru |
338acbb4fa83e8925fd45e166d1ef3e96095dd3a | c2b7f6e19d988adef4b02ac99fb4387a43bd5162 | /forml/project/__init__.py | 8730941291d7f2caf8e85eae710a3df696855c61 | [
"Apache-2.0"
] | permissive | hieuqtran/forml | 3fad180b071413bae22d419634da78bfa73f0187 | 7a63ef9031c0b5ac567462782d2241f7fd51a11a | refs/heads/master | 2023-01-21T11:25:37.417186 | 2020-12-04T23:24:49 | 2020-12-04T23:24:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Project setup mechanics.
"""
import pathlib
import typing
from forml.project import product
def open( # pylint: disable=redefined-builtin
path: typing.Optional[typing.Union[str, pathlib.Path]] = None,
package: typing.Optional[str] = None,
**modules: typing.Any,
) -> product.Artifact:
"""Shortcut for getting a product artifact.
Args:
path: Filesystem path to a package root.
package: Package name.
**modules: Project module mappings.
Returns:
Product artifact.
"""
return product.Artifact(path, package, **modules)
| [
"antonymayi@yahoo.com"
] | antonymayi@yahoo.com |
48f7491e0c3f2abe9255a379d4725fbb095e40cf | 9a393d5dae8147088b1c9b78987197c60a6618cf | /0909/트리순회1.py | 02d36f043d93d356d21d9beda48f0381e6a02dcc | [] | no_license | bumbum9944/bumpycharm | 5444440379f6d5142130bc8a7a4b69276f23f991 | b487eb433d41ff0d2f6e1ca4f723225b114b96c0 | refs/heads/master | 2020-07-05T16:04:35.153231 | 2019-10-02T00:14:00 | 2019-10-02T00:14:00 | 202,693,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | def preorder(n):
if n > 0:
print(n, end=' ')
preorder(ch1[n])
preorder(ch2[n])
def inorder(n):
if n > 0:
inorder(ch1[n])
print(n, end=' ')
inorder(ch2[n])
def posorder(n):
if n > 0:
posorder(ch1[n])
posorder(ch2[n])
print(n, end=' ')
def f(n): # n의 조상 출력하기
while(par[n] != 0): # n의 부모가 있으면
print(par[n], end=' ')
n = par[n]
V = int(input()) # 간선의 수 : V - 1 (정점 수보다 하나 적음)
E = V - 1
t = list(map(int, input().split()))
ch1 = [0] * (V+1) # 부모를 인덱스로 자식 저장
ch2 = [0] * (V+1)
par = [0] * (V+1) # 자식을 인덱스로 부모를 저장
for i in range(E):
p = t[2 * i]
c = t[2 * i + 1]
if ch1[p] == 0: # 아직 ch1 자식이 없으면
ch1[p] = c
else:
ch2[p] = c
par[c] = p
preorder(1)
print()
inorder(1)
print()
posorder(1)
print()
f(13) | [
"tong940526@gmail.com"
] | tong940526@gmail.com |
33175511ba6040b0a0a7f5b576ace6024b3d7774 | e4f1f60c587fadab2af3082836b559f981a74015 | /pcmdpy/instrument/__init__.py | 489c38c3b739020eb79f9380e8dfc5e9d2a1c473 | [
"MIT"
] | permissive | bacook17/pcmdpy | bb2cd4b224f6a7cad5ca638a94f8494945404c6a | ce2e9341efb1846e8c6c8bac27208603591ec525 | refs/heads/master | 2021-06-04T09:49:21.414770 | 2019-08-13T17:39:48 | 2019-08-13T17:39:48 | 113,083,573 | 7 | 2 | MIT | 2023-06-27T04:45:28 | 2017-12-04T19:09:52 | Batchfile | UTF-8 | Python | false | false | 1,264 | py | __all__ = ['Filter', 'PSF_Model', 'ACS_WFC_F435W', 'ACS_WFC_F475W',
'ACS_WFC_F555W', 'ACS_WFC_F606W', 'ACS_WFC_F814W', 'ACS_WFC_F850LP',
'm31_filter_sets', 'm49_filter_sets', 'm51_filter_sets',
'default_m31_filters', 'default_m49_filters', 'default_m51_filters',
'default_m87_filters', 'default_ngc3377_filters',
'default_df2_filters', 'default_ngc4993_filters',
'm87_filters_v2',
# 'lowexp_m87_filters',
'AVAILABLE_FILTERS', 'm31_narrow_psf',
'm31_summer_filters', 'm31_winter_filters']
from .filter import (Filter, ACS_WFC_F435W, ACS_WFC_F475W, ACS_WFC_F555W,
ACS_WFC_F606W, ACS_WFC_F814W, ACS_WFC_F850LP,
m31_filter_sets, m49_filter_sets, m51_filter_sets,
default_m31_filters, default_m49_filters,
default_m51_filters, default_m87_filters,
m87_filters_v2,
# lowexp_m87_filters,
default_ngc3377_filters, default_ngc4993_filters,
default_df2_filters,
m31_narrow_psf, m31_winter_filters, m31_summer_filters,
AVAILABLE_FILTERS)
from .psf import PSF_Model
| [
"bcook@cfa.harvard.edu"
] | bcook@cfa.harvard.edu |
d4abfa11594790ffa97ecee62b00d8d0ac32e299 | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/snailmail_account/wizard/snailmail_confirm_invoice_send.py | a3c3da40e8ce82168209c1cd0c4d37bf1a3167ef | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 585 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class SnailmailConfirmInvoiceSend(models.TransientModel):
_name = 'snailmail.confirm.invoice'
_inherit = ['snailmail.confirm']
_description = 'Snailmail Confirm Invoice'
invoice_send_id = fields.Many2one('account.invoice.send')
def _confirm(self):
self.ensure_one()
self.invoice_send_id._print_action()
def _continue(self):
self.ensure_one()
return self.invoice_send_id.send_and_print()
| [
"36736117+SHIVJITH@users.noreply.github.com"
] | 36736117+SHIVJITH@users.noreply.github.com |
a6bfb716cbecd5d2dd88fcf202bdc75dfe03994d | 97f2c1375fec921a217194b3d95376b70a62109e | /swapcase.py | 4854e7dd6fe57d9c0c98529f959345b8171cebd2 | [] | no_license | Avani1992/database_pytest | a113522c768333fc4a60a65c0fe2ef7416fada10 | 91b1c71d7b04a95fd7f35a4230689098f155ec8b | refs/heads/master | 2021-05-21T01:56:58.662034 | 2020-06-24T11:54:52 | 2020-06-24T11:54:52 | 252,494,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | """You are given a string and your task is to swap cases. In other words, convert all lowercase letters to uppercase letters and vice versa"""
s='HackerRank.com presents "Pythonist 2".'
def swap_case(s):
s1=''
for i in range(0,len(s)):
if(s[i].islower()):
s1=s1+s[i].upper()
elif(s[i].isupper()):
s1=s1+s[i].lower()
else:
s1=s1+s[i]
return s1
result=swap_case(s)
print(result) | [
"noreply@github.com"
] | Avani1992.noreply@github.com |
ba42bc876837d7eb14bccec52f662ff49983e7c1 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/source-instagram/setup.py | a15d5f7ab25e46f0a97117986cec69f1481bd81e | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 700 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = [
"airbyte-cdk",
"cached_property~=1.5",
"facebook_business~=11.0",
]
TEST_REQUIREMENTS = [
"requests-mock~=1.9.3",
"pytest~=6.1",
"pytest-mock~=3.6",
"requests_mock~=1.8",
]
setup(
name="source_instagram",
description="Source implementation for Instagram.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json", "schemas/shared/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
6a43351210c93cc49a59db2cfeecb3cca77fdecd | 32adb6b350bd0f0603cf4ec53c340d6b9a183007 | /posbench/httpconnect.py | eca6879204398779a5cf230e581f54bd866de78b | [] | no_license | HonryZhang/useful_tools | c197597a8a1395c9e8ee407f47fec28fbc6c8dab | c099c58b3263d3afd298bf7ce802675d71bbc9cd | refs/heads/master | 2023-06-17T20:10:19.758854 | 2021-07-15T07:29:37 | 2021-07-15T07:29:37 | 383,982,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | # -*- encoding=utf8 -*-
"""
description: http协议
author: baorb
"""
import sys
import httplib
import logging
if sys.version < '2.7':
import myLib.myhttplib as httplib
try:
import ssl
except ImportError:
logging.warning('import ssl module error')
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
logging.warning('create unverified https context except')
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
class MyHTTPConnection:
def __init__(self, host, port=None, timeout=80):
self.timeout = timeout
self.connection = None
self.host = host.split(',')[0]
self.port = port
def create_connection(self):
self.connection = httplib.HTTPConnection('{}:{}'.format(self.host, self.port), timeout=self.timeout)
logging.debug('create connection to host: ' + self.host)
def close_connection(self):
if not self.connection:
return
try:
self.connection.close()
except Exception, data:
logging.error('Caught [%s], when close a connection' % data)
# 此处暂不抛异常
pass
finally:
self.connection = None
def connect_connection(self):
self.connection.connect()
def compare_version(v1, v2):
v1 = v1.split('.')
v2 = v2.split('.')
try:
for i in range(0, len(v1)):
if len(v2) < i + 1:
return 1
elif int(v1[i]) < int(v2[i]):
return -1
elif int(v1[i]) > int(v2[i]):
return 1
except:
return -1
if len(v2) > len(v1):
return -1
return 0 | [
"jshori@163.com"
] | jshori@163.com |
6a740713868a23cf7fd977a20d8d013a847f8250 | 17594c0dc66f37efc19568509bfbe0d440adfce1 | /boj.kr/bfs/12851.py | 2e9b7cde188a3318f538733484710fb029c0f45f | [] | no_license | jhs851/algorithm | c7cea7d3773871593dea0805bdb9f67a9e36d380 | 24995cdb0f1100590c9f5cc1df4a7eedd2bfe281 | refs/heads/main | 2023-07-26T03:09:13.005799 | 2021-09-09T07:18:29 | 2021-09-09T07:18:29 | 362,413,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | from collections import deque
n, k = map(int, input().split())
MAX = 200000
visited = [False] * MAX
visited[n] = True
count = [0] * MAX
count[n] = 1
d = [0] * MAX
queue = deque([n])
while queue:
x = queue.popleft()
for nx in [x - 1, x + 1, x * 2]:
if 0 <= nx < MAX:
if not visited[nx]:
visited[nx] = True
d[nx] = d[x] + 1
count[nx] = count[x]
queue.append(nx)
elif d[nx] == d[x] + 1:
count[nx] += count[x]
print(d[k])
print(count[k])
| [
"jhs851@naver.com"
] | jhs851@naver.com |
837aa3a130026c8b465498a806652ea33f0025a5 | 99d7a6448a15e7770e3b6f3859da043300097136 | /src/database/migrate/isotopedb/versions/007_Add_Experiment_tables.py | 338f96e9ca7cf990fb22b611afeea95af91f7e14 | [] | no_license | softtrainee/arlab | 125c5943f83b37bc7431ae985ac7b936e08a8fe4 | b691b6be8214dcb56921c55daed4d009b0b62027 | refs/heads/master | 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | from sqlalchemy import *
from migrate import *
meta = MetaData()
t1 = Table('ExperimentTable', meta,
Column('id', Integer, primary_key=True),
Column('name', String(40)),
)
t2 = Table('ExtractionTable', meta,
Column('id', Integer, primary_key=True),
Column('script_name', String(80)),
Column('script_blob', BLOB),
Column('heat_device_id', Integer),
Column('position', Integer),
Column('value', Float),
Column('heat_duration', Float),
Column('clean_up_duration', Float)
)
t3 = Table('MeasurementTable', meta,
Column('id', Integer, primary_key=True),
Column('script_name', String(80)),
Column('script_blob', BLOB),
Column('mass_spectrometer_id', Integer)
)
t4 = Table('MassSpectrometerTable', meta,
Column('id', Integer, primary_key=True),
Column('name', String(40))
)
t5 = Table('MolecularWeightTable', meta,
Column('id', Integer, primary_key=True),
Column('name', String(40)),
Column('mass', Float)
)
t6 = Table('IsotopeTable', meta,
Column('id', Integer, primary_key=True),
Column('analysis_id', Integer),
Column('detector_id', Integer),
Column('molecular_weight_id', Integer)
)
tables = [t1, t2, t3, t4, t5, t6]
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta.bind = migrate_engine
for t in tables:
t.create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
for t in tables:
t.drop()
| [
"jirhiker@localhost"
] | jirhiker@localhost |
4b346e2c77f0c0aa4c1fea8b537ca84c24f7ed66 | e7af5a3e76e674be0a85628067fa494348d45123 | /Python-for-Finance-Second-Edition-master/Chapter09/c9_10_impact_of_correlation_2_stock.py | 31107dc2df965439f6e0c14d65ae1aa48973a9d9 | [
"MIT"
] | permissive | SeyedShobeiri/Work | 8321ead6f11de8297fa18d70a450602f700f26fb | f758e758106fbd53236a7fadae42e4ec6a4e8244 | refs/heads/master | 2022-07-25T02:33:25.852521 | 2020-05-17T16:11:27 | 2020-05-17T16:11:27 | 264,706,380 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
Name : c9_10_impact_of_correlation_2stocks.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import scipy as sp
import matplotlib.pyplot as plt
sp.random.seed(123)
n=1000
sigma1=0.3
sigma2=0.20
n_step=20
for i in sp.arange(n_step):
print i
| [
"shobeiri@math.uh.edu"
] | shobeiri@math.uh.edu |
1082ad197f0a5adad1f88bc26c43e59264fcbe44 | b2dab4c71cd43ffffee8387ca4e3b0b6b0af1b26 | /scripts/floating_transform_publisher | d64141ddde443b20b0c6aff2b3594c79dc11d978 | [
"BSD-3-Clause"
] | permissive | rpiRobotics/floating_transform_publisher | fdca218bdb18ebf4ff5aad248d94e3a121314e62 | f4b9293b96b5f51dd5a5f917599643cf523f17b2 | refs/heads/master | 2020-03-22T14:45:16.173284 | 2018-08-01T19:19:06 | 2018-08-01T19:19:06 | 140,202,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,096 | #!/usr/bin/env python
# Copyright (c) 2017, Rensselaer Polytechnic Institute, Wason Technology LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Rensselaer Polytechnic Institute, or Wason
# Technology LLC, nor the names of its contributors may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import TransformStamped, Quaternion, Vector3
from urdf_parser_py.urdf import URDF
from tf.transformations import quaternion_from_euler
from floating_transform_publisher.srv import UpdateFloatingTransform, UpdateFloatingTransformResponse, \
ResetFloatingTransform, ResetFloatingTransformResponse
import threading
import copy
class FloatingTransformPublisher(object):
def __init__(self, robot_key):
self.urdf_robot = URDF.from_parameter_server(robot_key)
floating_joints=[]
for j in self.urdf_robot.joints:
if j.type == "floating":
floating_joints.append(j)
self.floating_joints=floating_joints
now=rospy.Time.now()
tfs=[]
for j in floating_joints:
tf=TransformStamped()
tf.header.frame_id=j.parent
tf.header.stamp=now
tf.child_frame_id=j.child
#TODO: fill in origin from URDF
tf.transform.translation=Vector3(*j.origin.xyz)
tf.transform.rotation=Quaternion(*quaternion_from_euler(*j.origin.rpy))
tfs.append(tf)
self.tfs=tfs
self.reset_tfs=copy.deepcopy(tfs)
self.tf_pub=rospy.Publisher("tf", TFMessage, queue_size=10)
self.update_srv=rospy.Service("update_floating_transform", UpdateFloatingTransform, self.update_floating_transform)
self.reset_srv=rospy.Service("reset_floating_transform", ResetFloatingTransform, self.reset_floating_transform)
self._lock=threading.Lock()
def publish(self):
with self._lock:
now=rospy.Time.now()
for tf in self.tfs:
tf.header.stamp=now
tf_message=TFMessage(self.tfs)
self.tf_pub.publish(tf_message)
def update_floating_transform(self, req):
with self._lock:
now=rospy.Time.now()
res=UpdateFloatingTransformResponse(True)
req_tfs=req.transforms
for req_tf in req_tfs:
if not any(e.child_frame_id == req_tf.child_frame_id for e in self.tfs):
rospy.logerr("Invalid child_frame %s", req_tf.child_frame_id)
res.success=False
return res
for req_tf in req_tfs:
dest_tf=[e for e in self.tfs if e.child_frame_id == req_tf.child_frame_id][0]
dest_tf.header.frame_id=req_tf.header.frame_id
dest_tf.header.stamp=now
dest_tf.child_frame_id=req_tf.child_frame_id
dest_tf.transform=req_tf.transform
return res
def reset_floating_transform(self, req):
with self._lock:
now=rospy.Time.now()
self.tfs=copy.deepcopy(self.reset_tfs)
for tf in self.tfs:
tf.header.stamp=now
return ResetFloatingTransformResponse(True)
def main():
rospy.init_node("floating_transform_publisher")
tf=FloatingTransformPublisher('robot_description')
rate=rospy.Rate(50)
while not rospy.is_shutdown():
rate.sleep()
tf.publish()
if __name__ == '__main__':
main()
| [
"wason@wasontech.com"
] | wason@wasontech.com | |
8a4c38ce99e162e12cbd9cdb98e3afeacb82b5d4 | 245ba2bf9aabba09ae4642a8e5baf26131b37834 | /algorithms/190819/연습문제3.py | 167e438d6def838712a49554066de33a30e51ca6 | [] | no_license | toohong5/TIL | 3c441db905bf53025f6f6e0942336bdc3a959297 | cb22fe0003405861c80203b02a396b7374db356b | refs/heads/master | 2023-01-13T12:56:09.260384 | 2019-11-27T08:31:01 | 2019-11-27T08:31:01 | 195,918,089 | 0 | 0 | null | 2023-01-07T18:11:17 | 2019-07-09T02:30:56 | Jupyter Notebook | UTF-8 | Python | false | false | 1,699 | py | import sys
sys.stdin = open('04 Stack1_DFS_input.txt', 'r')
# V, E = map(int, input().split()) # 정점수, 간선수
# G = [[] for _ in range(V + 1)] # 정점번호 : 1~V까지 사용
# for _ in range(E):
# u, v = map(int, input().split())
# G[u].append(v)
# G[v].append(u)
# for i in range(1, V + 1):
# print(i, '-->', G[i]) # 각 정점의 인접정점을 표현
# def DFS(v): # v는 시작점
# S = []
# visit = [False] * (V + 1)
# visit[v] = True # 시작점을 방문한다.
# print(v, end=' ')
# S.append(v) # 시작점을 스택에 push
# while S: # 빈 스택이 아닐 동안 반복
# # v의 방문하지 않은 인접정점을 찾는다. ==> w
# for w in G[v]:
# if not visit[w]: # 방문하지 않은 인접정점 찾기
# visit[w] = True # w를 방문하고
# print(w, end=' ') # 방문순서 출력
# S.append(v) # v를 스택에 push
# v = w # w를 현재 방문하는 정점으로 설정(다시 반복하기 위함)
# break # 빠져나옴
# else: # 이전에 방문한 정점으로 되돌아 간다.
# v = S.pop()
# print(DFS(1))
#----------------------------------------------
# 재귀호출
V, E = map(int, input().split()) # 정점수, 간선수
G = [[] for _ in range(V + 1)] # 정점번호 : 1~V까지 사용
visit = [False] * (V + 1)
for _ in range(E):
u, v = map(int, input().split())
G[u].append(v)
G[v].append(u)
def DFS(v):
visit[v] = True; print(v, end=' ') # 현재 방문하는 정점
for w in G[v]:
if not visit[w]:
DFS(w)
print(DFS(1))
| [
"toohong5@gmail.com"
] | toohong5@gmail.com |
9374f92882be4d8c685ff5986c2e6b3cf113131b | 48a7b266737b62da330170ca4fe4ac4bf1d8b663 | /molsysmt/physchem/get_surface_area.py | 4b4b49cebf41fe836d3e0d773e03eea86fb4e139 | [
"MIT"
] | permissive | uibcdf/MolSysMT | ddab5a89b8ec2377f383884c5169d147cab01322 | c3d713ba63db24eb8a2426115cf8d9cb3665d225 | refs/heads/main | 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 | MIT | 2023-06-04T20:27:06 | 2018-06-19T19:38:44 | Python | UTF-8 | Python | false | false | 711 | py | from molsysmt._private.digestion import digest
from molsysmt._private.exceptions import NotImplementedMethodError
import numpy as np
@digest()
def get_surface_area(molecular_system, selection='all', syntax='MolSysMT', definition='collantes'):
"""
To be written soon...
"""
from molsysmt.basic import get
if definition == 'collantes':
from .groups.surface_area import collantes as values
else:
raise NotImplementedMethodError
group_types = get(molecular_system, element='group', selection=selection, syntax=syntax, name=True)
output = []
for ii in group_types:
output.append(values[ii.upper()])
output = np.array(output)
return output
| [
"prada.gracia@gmail.com"
] | prada.gracia@gmail.com |
d87a980e970cfe648a7c3ed31263cbdeb8ac63f1 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/12103011.py | a900ba1efdfc69f94b53148dc0d2076a03ba6c1a | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/12103011.py generated: Fri, 27 Mar 2015 16:10:04
#
# Event Type: 12103011
#
# ASCII decay Descriptor: [B+ -> K+ K- K+]cc
#
from Configurables import Generation
Generation().EventType = 12103011
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_K+K-K+=phsp,NoCut.dec"
Generation().SignalRepeatedHadronization.CutTool = ""
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12103011
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
b7a6a233b4f0339a1ebc06963cccc30764639268 | ea05617b5d33a641bb60b735e936e8f0ba6e57a7 | /unittests/test_lib_stattext.py | 2b9e4c91a84a74c6add8c63e390d40262a79fe63 | [] | no_license | bbpatil/Phoenix | 18716744f5a3f5dbd805520baf3edc14ebde9529 | 4d05434a6c9e9effb2ade8085e2bfa83775575ed | refs/heads/master | 2022-02-23T21:40:34.510672 | 2016-06-12T05:26:06 | 2016-06-12T05:26:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | import unittest
import wtc
import wx
import wx.lib.stattext
#---------------------------------------------------------------------------
class lib_stattext_Tests(wtc.WidgetTestCase):
def test_lib_stattext1(self):
pnl = wx.Panel(self.frame)
w = wx.lib.stattext.GenStaticText(pnl, label="This is a test", pos=(10,10))
bs1 = w.GetEffectiveMinSize()
w.SetLabel("This is a New Label")
w.SetFont(wx.FFont(16, wx.FONTFAMILY_ROMAN))
bs2 = w.GetEffectiveMinSize()
self.assertEqual(w.GetLabel(), "This is a New Label")
self.assertEqual(w.Label, "This is a New Label")
self.assertTrue(bs2.height > bs1.height)
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| [
"robin@alldunn.com"
] | robin@alldunn.com |
82aa33b02948617205755e82c678811a99b89edf | 1637440d91025bcbccee1703841362ac37ea5176 | /syncano_cli/parse_to_syncano/migrations/relation.py | 55c4b8fc867a2a2f3a332e6a7e16fb0c4eb680bb | [
"MIT"
] | permissive | dhruveshsheladiya/syncano-cli | 3c440b8861f1e88c08f7d91f52d891128d975ad3 | 5ca110cd3aef3ed51b2ad72c1b8a5a83efa3dce6 | refs/heads/master | 2020-03-24T01:16:26.502638 | 2016-09-27T19:18:05 | 2016-09-27T19:18:05 | 142,328,999 | 0 | 0 | null | 2018-07-25T16:54:33 | 2018-07-25T16:54:33 | null | UTF-8 | Python | false | false | 4,111 | py | # -*- coding: utf-8 -*-
import six
from syncano.models import Object
from syncano_cli.parse_to_syncano.config import PARSE_PAGINATION_LIMIT
from syncano_cli.parse_to_syncano.migrations.aggregation import data_aggregate
from syncano_cli.parse_to_syncano.migrations.mixins import PaginationMixin, ParseConnectionMixin
from syncano_cli.parse_to_syncano.processors.klass import ClassProcessor
class ClassRelationProcessor(ParseConnectionMixin, PaginationMixin):
def __init__(self, class_name, relations, config):
self.class_name = class_name
self.relations = relations
self.reference_map = data_aggregate.reference_map
self.config = config
def process_class(self, instance):
for relation in self.relations:
for field_name, relation_meta in six.iteritems(relation):
target_name = relation_meta['targetClass']
self._find_and_update_relations_objects(
field_name=field_name,
target_name=target_name,
instance=instance
)
def _find_and_update_relations_objects(self, field_name, target_name, instance):
# get the parse classes now;
for parse_class_name, objects_id_map in six.iteritems(self.reference_map):
if self.class_name == ClassProcessor.normalize_class_name(parse_class_name):
for parse_id, syncano_id in six.iteritems(objects_id_map):
self._find_relations_for_object(
parse_class_name=parse_class_name,
target_name=target_name,
parse_id=parse_id,
syncano_id=syncano_id,
field_name=field_name,
instance=instance
)
def _find_relations_for_object(self, parse_class_name, target_name, parse_id, syncano_id, field_name, instance):
limit, skip = self.get_limit_and_skip()
while True:
objects = self._find_parse_objects(parse_class_name, parse_id, field_name,
target_name, limit, skip)
if not objects['results']:
break
limit += PARSE_PAGINATION_LIMIT
skip += PARSE_PAGINATION_LIMIT
self._update_syncano_object(
field_name=field_name,
target_name=target_name,
objects_results=objects['results'],
syncano_id=syncano_id,
instance=instance
)
def _find_parse_objects(self, parse_class_name, parse_id, field_name, target_name, limit, skip):
query = {
"$relatedTo": {
"object": {
"__type": "Pointer",
"className": parse_class_name,
"objectId": parse_id},
"key": field_name}
}
return self.parse.get_class_objects(target_name, limit=limit, skip=skip, query=query)
def _update_syncano_object(self, field_name, target_name, objects_results, syncano_id, instance):
Object.please.update(
**{
field_name: {
"_add": [
self.reference_map[target_name][
data_object['objectId']
] for data_object in objects_results]
},
"class_name": self.class_name,
"id": syncano_id,
"instance_name": instance.name
}
)
class RelationProcessor(object):
def __init__(self, class_name, class_relations, *args, **kwargs):
super(RelationProcessor, self).__init__(*args, **kwargs)
self.class_relations = class_relations
self.class_name = class_name
def process(self, instance, config):
class_relation_processor = ClassRelationProcessor(
class_name=self.class_name,
relations=self.class_relations,
config=config
)
class_relation_processor.process_class(instance)
| [
"opalczynski@gmail.com"
] | opalczynski@gmail.com |
4b53d1aff74b9ca1c7a958b676fafc0af6f30f2e | 8e0a7d3f10a10942158cf3f95fd2e81e92d37843 | /test_twisted_async_response/concurrent_client.py | c0b76423fc79be5e9f2a53fcc5f32786ca933f33 | [] | no_license | xsren/python_test_demo | 1d528cb8ebe017d962a2b155638e233c094b1e63 | d320276bc9f6fb80860d8db7fcd3f500865176b3 | refs/heads/master | 2020-12-30T17:50:43.830322 | 2017-08-01T03:35:45 | 2017-08-01T03:35:45 | 82,634,652 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | # coding:utf8
import requests
import threading
def run():
print requests.get('http://127.0.0.1:1234/crawler')
def main():
t_list = []
for i in xrange(30):
t_list.append(threading.Thread(target=run, args=()))
for t in t_list:
t.start()
for t in t_list:
t.join()
if __name__ == '__main__':
main()
| [
"bestrenxs@gmail.com"
] | bestrenxs@gmail.com |
b311e6d7b6d1e81c08bb8cc84850ffd001b69295 | 2455062787d67535da8be051ac5e361a097cf66f | /Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_372.py | 3fbb1b489f893e31fa92395685530a11c8ff0448 | [] | no_license | kmtos/BBA-RecoLevel | 6e153c08d5ef579a42800f6c11995ee55eb54846 | 367adaa745fbdb43e875e5ce837c613d288738ab | refs/heads/master | 2021-01-10T08:33:45.509687 | 2015-12-04T09:20:14 | 2015-12-04T09:20:14 | 43,355,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_372.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_372.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
| [
"kmtos@ucdavis.edu"
] | kmtos@ucdavis.edu |
82dac60d0ffe68054b0a7335cbf480b129ea8419 | 165a9d0db328e96d14ac7c3073205819768959e2 | /CDS_71_rand5_of_rand7.py | 4f2fde50f5a26969c74f7a83091584b94c28fade | [
"MIT"
] | permissive | celelstine/codingProblemSolutions | 12cef3396be96509bb37505fa357391537a8ad72 | 52e567aaa4a6e40c11bc131c2fdfffccfa7408b1 | refs/heads/master | 2021-06-05T21:15:29.772534 | 2020-02-23T18:19:29 | 2020-02-23T18:19:29 | 146,922,897 | 0 | 0 | MIT | 2018-10-01T05:32:59 | 2018-08-31T17:28:03 | JavaScript | UTF-8 | Python | false | false | 445 | py | import random
def rand7():
return random.randint(1,7)
def rand5():
problemDescription = '''
*problemDescription* \n
This problem was asked by Two Sigma.
Using a function rand7() that returns an integer from 1 to 7 (inclusive)
with uniform probability, implement a function rand5() that returns an
integer from 1 to 5 (inclusive).
'''
print(problemDescription)
return (rand7() * 5) /7
print(rand5())
| [
"okorocelestine@gmail.com"
] | okorocelestine@gmail.com |
ffda15de7089820556396da2ce8e685c3b9e6e91 | 62b75c03509dcd993a28eba2bb7004ae5f427f73 | /astropy/nddata/tests/test_flag_collection.py | 7ed4af6b0dcebb37473a2aebb7e4562660adce6a | [] | permissive | xiaomi1122/astropy | 08aba5592d9bb54e725708352e34db89af2ec289 | 8876e902f5efa02a3fc27d82fe15c16001d4df5e | refs/heads/master | 2020-04-09T12:27:36.768462 | 2018-12-06T01:11:23 | 2018-12-06T01:11:23 | 160,299,140 | 0 | 0 | BSD-3-Clause | 2018-12-04T04:52:22 | 2018-12-04T04:52:22 | null | UTF-8 | Python | false | false | 1,460 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from ...tests.helper import pytest
from .. import FlagCollection
def test_init():
FlagCollection(shape=(1, 2, 3))
def test_init_noshape():
with pytest.raises(Exception) as exc:
FlagCollection()
assert exc.value.args[0] == 'FlagCollection should be initialized with the shape of the data'
def test_init_notiterable():
with pytest.raises(Exception) as exc:
FlagCollection(shape=1.)
assert exc.value.args[0] == 'FlagCollection shape should be an iterable object'
def test_setitem():
f = FlagCollection(shape=(1, 2, 3))
f['a'] = np.ones((1, 2, 3)).astype(float)
f['b'] = np.ones((1, 2, 3)).astype(int)
f['c'] = np.ones((1, 2, 3)).astype(bool)
f['d'] = np.ones((1, 2, 3)).astype(str)
@pytest.mark.parametrize(('value'), [1, 1., 'spam', [1, 2, 3], (1., 2., 3.)])
def test_setitem_invalid_type(value):
f = FlagCollection(shape=(1, 2, 3))
with pytest.raises(Exception) as exc:
f['a'] = value
assert exc.value.args[0] == 'flags should be given as a Numpy array'
def test_setitem_invalid_shape():
f = FlagCollection(shape=(1, 2, 3))
with pytest.raises(Exception) as exc:
f['a'] = np.ones((3, 2, 1))
assert exc.value.args[0] == 'flags array shape (3, 2, 1) does not match data shape (1, 2, 3)'
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
72a5ea65e3d05e179ebf71c80191e2fe5820c5fa | bda892fd07e3879df21dcd1775c86269587e7e07 | /leetcode/0307_M_区域和检索 - 数组可修改_线段树.py | 612a279505a8bc32f9fedd8413e5da5368b70c77 | [] | no_license | CrzRabbit/Python | 46923109b6e516820dd90f880f6603f1cc71ba11 | 055ace9f0ca4fb09326da77ae39e33173b3bde15 | refs/heads/master | 2021-12-23T15:44:46.539503 | 2021-09-23T09:32:42 | 2021-09-23T09:32:42 | 119,370,525 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,030 | py | '''
给你一个数组 nums ,请你完成两类查询,其中一类查询要求更新数组下标对应的值,另一类查询要求返回数组中某个范围内元素的总和。
实现 NumArray 类:
NumArray(int[] nums) 用整数数组 nums 初始化对象
void update(int index, int val) 将 nums[index] 的值更新为 val
int sumRange(int left, int right) 返回子数组 nums[left, right] 的总和(即,nums[left] + nums[left + 1], ..., nums[right])
示例:
输入:
["NumArray", "sumRange", "update", "sumRange"]
[[[1, 3, 5]], [0, 2], [1, 2], [0, 2]]
输出:
[null, 9, null, 8]
解释:
NumArray numArray = new NumArray([1, 3, 5]);
numArray.sumRange(0, 2); // 返回 9 ,sum([1,3,5]) = 9
numArray.update(1, 2); // nums = [1,2,5]
numArray.sumRange(0, 2); // 返回 8 ,sum([1,2,5]) = 8
提示:
1 <= nums.length <= 3 * 104
-100 <= nums[i] <= 100
0 <= index < nums.length
-100 <= val <= 100
0 <= left <= right < nums.length
最多调用 3 * 104 次 update 和 sumRange 方法
'''
from typing import List
from leetcode.tools.tree import showTree, buildTree
class NumArray:
'''
线段树
'''
def __init__(self, nums: List[int]):
self.len = len(nums)
'''
长度为2n
'''
self.tree = [0 for _ in range(2 * self.len)]
'''
叶子节点即原数据放在[n, 2n - 1]
'''
self.tree[self.len:] = nums
'''
非叶子节点i左孩子为2 * i, 右孩子为2 * i + 1
'''
for i in range(self.len - 1, 0, -1):
self.tree[i] = self.tree[i * 2] + self.tree[i * 2 + 1]
'''
从n + index的叶节点开始更新
'''
def update(self, index: int, val: int) -> None:
index = index + self.len
self.tree[index] = val
while index > 0:
left = index
right = index
if left % 2 == 0:
right += 1
else:
left -= 1
index = left // 2
self.tree[index] = self.tree[left] + self.tree[right]
'''
[left, right]区间计算
'''
def sumRange(self, left: int, right: int) -> int:
left += self.len
right += self.len
sum = 0
while left <= right:
'''
left为右孩子,计算后右移
'''
if left % 2 == 1:
sum += self.tree[left]
left += 1
'''
right为左孩子,计算后左移
'''
if right % 2 == 0:
sum += self.tree[right]
right -= 1
left //= 2
right //= 2
return sum
numArray = NumArray([1, 3, 5])
print(numArray.sumRange(0, 2), numArray.update(1, 2), numArray.sumRange(0, 2))
showTree(buildTree([i for i in range(1, 10)]))
'''
1
2 3
4 5 6 7
8 9 . . . . . .
'''
| [
"1016864609@qq.com"
] | 1016864609@qq.com |
6f28896faeb0e9fe47f882df32f74ded478ad885 | 7ae0f100b49763f79b276260bbc0e87bd904da3e | /src/app/__init__.py | 5b98b0a760bd15666f075779f5753aab40f11762 | [] | no_license | wondersell/wildsearch-indexer | d88a5b3bce17acc1cb61d365f55ab5d9f63f61ae | 67d5f29f6d405c055cfa211ddf0b70521382a671 | refs/heads/master | 2023-07-19T00:33:34.371231 | 2020-12-31T11:20:00 | 2020-12-31T11:20:00 | 285,488,583 | 2 | 0 | null | 2021-07-19T06:26:44 | 2020-08-06T06:09:51 | Python | UTF-8 | Python | false | false | 174 | py | # This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import celery # noqa: ABS101
__all__ = ['celery']
| [
"artem.kiselev@gmail.com"
] | artem.kiselev@gmail.com |
e5725a8aae710a9d7a9701ea844e456eb498767a | 7d9de1ac9a70220f4cf1738c4ae25507ad85ca04 | /pytorch/range_test.py | 5c3290f374acb1d7c0f06f413e08b1a25af22704 | [] | no_license | seasa2016/code_test | e977804cc1c8a6e07d2ed99b0835fb93b09f7dd1 | 8ac8c3aec96c69c98a4ce2789fbfede28491a6fc | refs/heads/master | 2020-03-25T13:48:19.409894 | 2018-11-16T11:22:46 | 2018-11-16T11:22:46 | 143,843,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | import torch
import torch.nn as nn
a = torch.range(0,5,requires_grad=True)
loss = torch.sum(a) - 5
loss.backward()
print(a.grad) | [
"ericet1234@gmail.com"
] | ericet1234@gmail.com |
bed747c3c3a0581ffe2d77383b8e2571d2637536 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/piratesgui/ShipItemList.py | 14fdb75275497d82285f3ce5d3b4f6967e823517 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | # File: S (Python 2.4)
from direct.gui.DirectGui import *
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui import InventoryItemList
from pirates.piratesgui import ShipItemGUI
from pirates.piratesbase import PiratesGlobals
class ShipItemList(InventoryItemList.InventoryItemList):
def __init__(self, inventory, height, trade = 0, buy = 0, sell = 0, use = 0):
InventoryItemList.InventoryItemList.__init__(self, inventory, height, trade, buy, sell, use)
self.initialiseoptions(ShipItemList)
def loadInventoryPanels(self):
for item in self.inventory:
data = [
item,
1]
self.addPanel(data, repack = 0)
self.repackPanels()
def addPanel(self, data, repack = 1):
panel = ShipItemGUI.ShipItemGUI(data, trade = self.trade, buy = self.buy, sell = self.sell, use = self.use)
panel.reparentTo(self.getCanvas())
self.panels.append(panel)
if repack:
self.repackPanels()
def repackPanels(self):
invHeight = len(self.inventory)
z = 0.01 + PiratesGuiGlobals.ShipItemGuiHeight
i = 0
for i in xrange(len(self.panels)):
self.panels[i].setPos(0.01, 0, -z * (i + 1))
self.panels[i].origionalPos = self.panels[i].getPos(render2d)
self['canvasSize'] = (0, PiratesGuiGlobals.ShipItemGuiWidth - 0.089, -z * (i + 1), 0)
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
5eb503c79c974710bfb8cd6bb81664d03409cc35 | a0cbae33d175fdf0299eddc775a1b4b84c0addcf | /orquesta/tests/unit/utils/test_specs.py | 1fe005ff1a6149426c489f2b0933ba00ce4dc4d8 | [
"Apache-2.0"
] | permissive | batk0/orquesta | 240ff95c76c610c52518ee7d2e3eee11b6594a73 | f03f3f2f3820bf111a9277f4f6c5d6c83a89d004 | refs/heads/master | 2020-04-17T10:48:48.016607 | 2019-01-19T15:40:05 | 2019-01-19T15:40:05 | 166,514,957 | 0 | 0 | Apache-2.0 | 2019-01-19T06:37:39 | 2019-01-19T06:37:39 | null | UTF-8 | Python | false | false | 3,302 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from orquesta.specs import loader
from orquesta.tests.unit import base
from orquesta.utils import specs
class SpecsUtilTest(base.WorkflowSpecTest):
def setUp(self):
super(SpecsUtilTest, self).setUp()
self.spec_module = loader.get_spec_module(self.spec_module_name)
def test_convert_wf_def_dict_to_spec(self):
wf_name = 'basic'
wf_def = self.get_wf_def(wf_name)
self.assertIsInstance(wf_def, dict)
wf_spec = specs.instantiate(self.spec_module_name, wf_def)
self.assertIsInstance(wf_spec, self.spec_module.WorkflowSpec)
self.assertEqual(wf_name, wf_spec.name)
self.assertDictEqual(wf_def[wf_name], wf_spec.spec)
def test_convert_wf_def_yaml_to_spec(self):
wf_name = 'basic'
wf_def = self.get_wf_def(wf_name, raw=True)
self.assertIsInstance(wf_def, str)
wf_spec = specs.instantiate(self.spec_module_name, wf_def)
self.assertIsInstance(wf_spec, self.spec_module.WorkflowSpec)
self.assertEqual(wf_name, wf_spec.name)
self.assertDictEqual(yaml.safe_load(wf_def)[wf_name], wf_spec.spec)
def test_bad_wf_def_none(self):
self.assertRaises(
ValueError,
specs.instantiate,
self.spec_module_name,
None
)
def test_bad_wf_def_empty(self):
self.assertRaises(
ValueError,
specs.instantiate,
self.spec_module_name,
dict()
)
def test_bad_wf_def_not_yaml(self):
self.assertRaises(
ValueError,
specs.instantiate,
self.spec_module_name,
'foobar'
)
def test_bad_wf_def_without_version(self):
wf_name = 'basic'
wf_def = self.get_wf_def(wf_name)
wf_def.pop('version')
self.assertIsNone(wf_def.get('version'))
self.assertRaises(
ValueError,
specs.instantiate,
self.spec_module_name,
wf_def
)
def test_bad_wf_def_unsupported_version(self):
wf_name = 'basic'
wf_def = self.get_wf_def(wf_name)
wf_def['version'] = 99.0
self.assertRaises(
ValueError,
specs.instantiate,
self.spec_module_name,
wf_def
)
def test_deserialize(self):
wf_name = 'basic'
wf_def = self.get_wf_def(wf_name)
wf_spec_1 = specs.instantiate(self.spec_module_name, wf_def)
wf_spec_2 = specs.deserialize(wf_spec_1.serialize())
self.assertIsInstance(wf_spec_2, self.spec_module.WorkflowSpec)
self.assertEqual(wf_name, wf_spec_2.name)
self.assertDictEqual(wf_def[wf_name], wf_spec_2.spec)
| [
"m4d.coder@gmail.com"
] | m4d.coder@gmail.com |
946fec7bf11af536f56d0e05551393f774b9cab7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_interiors.py | c9fcc546f81fabbad76438e54f363fd59fe36da0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _INTERIORS():
def __init__(self,):
self.name = "INTERIORS"
self.definitions = interior
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['interior']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4332107881083ad19cbe16942b26916ed9a854ca | e837db39c9609830ab8e77dac2077ea30cadc5b3 | /flachdaecher_app/admin.py | 6e3bb183ee3a1257841989b66c3583d79a1ad334 | [] | no_license | windundschnee/accountneu | 9c8ff1507f725a5179604be2640d76b5302a0299 | da9066840a312a95bc628556c94738010787a01f | refs/heads/master | 2022-12-10T06:00:42.449898 | 2019-10-25T18:29:23 | 2019-10-25T18:29:23 | 211,513,631 | 0 | 0 | null | 2022-12-08T05:22:15 | 2019-09-28T14:34:00 | Python | UTF-8 | Python | false | false | 133 | py | from django.contrib import admin
from .models import FlachdachModel
# Register your models here.
admin.site.register(FlachdachModel)
| [
"du@example.com"
] | du@example.com |
789d8aa33c6611b381c958efe2909d5a1a4e8aa4 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/edabit/_Edabit-Solutions-master/Find the Smallest and Biggest Numbers/solution.py | b9e343dc2a0a5a983bdba5cf2f792491328f77c8 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 315 | py | def min_max(nums):
nums.sort()
return [nums[0],nums[-1]]
def test():
print("test has started")
a_list = [14, 35, 6, 1, 34, 54]
if min_max(a_list) != [1,54]:
print("error1")
b_list = [1.346, 1.6532, 1.8734, 1.8723]
if min_max(b_list) != [1.346, 1.8734]:
print("error2")
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
8ff2d9f8ee57060f9e9421080728a6c5e92faa98 | e3abbb5790aac835330b0c2fe59eacc6765ae3cc | /tests/test_disconnects.py | 89b70530832b842d8df631c32c8724c4928950e6 | [
"MIT"
] | permissive | frankrousseau/mrq | 3d9433c809735666208edc532451dbba87f3d40c | 3c9c34f0df1de067c713be6f2d38eff8f923e03b | refs/heads/master | 2021-01-18T05:59:34.727873 | 2015-02-04T14:30:03 | 2015-02-04T14:30:03 | 30,272,780 | 0 | 0 | null | 2015-02-04T00:36:14 | 2015-02-04T00:36:13 | null | UTF-8 | Python | false | false | 892 | py | import time
from mrq.job import Job
import pytest
@pytest.mark.parametrize(["p_service"], [["mongodb"], ["redis"]])
def test_disconnects_service_during_task(worker, p_service):
""" Test what happens when mongodb disconnects during a job
"""
worker.start()
if p_service == "mongodb":
service = worker.fixture_mongodb
elif p_service == "redis":
service = worker.fixture_redis
service_pid = service.process.pid
job_id1 = worker.send_task("tests.tasks.general.Add", {
"a": 41, "b": 1, "sleep": 5}, block=False, queue="default")
time.sleep(2)
service.stop()
service.start()
service_pid2 = service.process.pid
# Make sure we did restart
assert service_pid != service_pid2
time.sleep(5)
# Result should be there without issues
assert Job(job_id1).fetch().data["result"] == 42
| [
"sylvain@sylvainzimmer.com"
] | sylvain@sylvainzimmer.com |
a738b72a044dabd2ea0da78eb4fb5e20d79f0280 | 19236d9e966cf5bafbe5479d613a175211e1dd37 | /cohesity_management_sdk/models/hyper_flex_storae_snapshot.py | be7bd6f758fcf271342c9f9ffb251ae480b2b9b7 | [
"MIT"
] | permissive | hemanshu-cohesity/management-sdk-python | 236c44fbd9604809027f8ddd0ae6c36e4e727615 | 07c5adee58810979780679065250d82b4b2cdaab | refs/heads/master | 2020-04-29T23:22:08.909550 | 2019-04-10T02:42:16 | 2019-04-10T02:42:16 | 176,474,523 | 0 | 0 | NOASSERTION | 2019-03-19T09:27:14 | 2019-03-19T09:27:12 | null | UTF-8 | Python | false | false | 2,226 | py | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class HyperFlexStoraeSnapshot(object):
"""Implementation of the 'HyperFlex Storae Snapshot.' model.
Specifies a Storage Snapshot Provider in a HyperFlex environment.
Attributes:
name (string): Specifies a unique name of the Protection Source
product_version (string): Specifies the product version of the
protection source.
mtype (Type5Enum): Specifies the type of managed Object in a HyperFlex
protection source like kServer. Examples of a HyperFlex types
include 'kServer'. 'kServer' indicates HyperFlex server entity.
uuid (string): Specifies the uuid of the protection source.
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"product_version":'productVersion',
"mtype":'type',
"uuid":'uuid'
}
def __init__(self,
name=None,
product_version=None,
mtype=None,
uuid=None):
"""Constructor for the HyperFlexStoraeSnapshot class"""
# Initialize members of the class
self.name = name
self.product_version = product_version
self.mtype = mtype
self.uuid = uuid
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
product_version = dictionary.get('productVersion')
mtype = dictionary.get('type')
uuid = dictionary.get('uuid')
# Return an object of this model
return cls(name,
product_version,
mtype,
uuid)
| [
"ashish@cohesity.com"
] | ashish@cohesity.com |
0ba165016674eb6222d4c715824556f5bce78ae5 | b3cb41c81069ad2e447a7bab98fd269235996a51 | /pyprop/base_classes.py | 7a2ee646915c15f7198c3e7beb8fc07645d42ef2 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | usuaero/PyProp | e289e8bd64d2d0db51547a808a1f019b37b14fc4 | e568dda610632adf1ab208a6861cca8d8dd84e75 | refs/heads/master | 2023-06-03T01:35:08.525608 | 2021-06-21T16:45:51 | 2021-06-21T16:45:51 | 280,196,572 | 15 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | py | """Defines base classes for the module."""
import os
import sqlite3 as sql
import numpy as np
from .exceptions import DatabaseRecordNotFoundError
class DatabaseComponent:
"""A component defined in the database. DEPRECIATED"""
def __init__(self):
# Get database file
self._db_file = os.path.join(os.path.dirname(__file__), "components.db")
# Set up params
self._table_names = {
"battery" : "Batteries",
"ESC" : "ESCs",
"motor" : "Motors",
"prop" : "Props"
}
def get_database_record(self, component_type, **kwargs):
"""Extracts a record from the database."""
# Get kwargs
name = kwargs.get("name", None)
manufacturer = kwargs.get("manufacturer", None)
dbid = kwargs.get("dbid", None)
capacity = kwargs.get("capacity", None)
I_max = kwargs.get("I_max", None)
Kv = kwargs.get("Kv", None)
diameter = kwargs.get("diameter", None)
pitch = kwargs.get("pitch", None)
# Get database location and connection
with sql.connect(self._db_file) as conn:
db_cur = conn.cursor()
# Format command generically
table_name = self._table_names[component_type]
command = "select * from {0}".format(table_name)
if name is not None:
if manufacturer is not None or dbid is not None:
raise ValueError("Too many {0} parameters specified.".format(component_type))
command = command+" where Name = '"+name+"'"
elif manufacturer is not None:
if dbid is not None:
raise ValueError("Too many {0} parameters specified.".format(component_type))
command = command+" where manufacturer = '"+manufacturer+"'"
elif dbid is not None:
command = command+" where id = "+str(dbid)
# Add component-specific commands
if component_type == "battery" and capacity is not None:
command = command+" order by abs("+str(capacity)+"-Capacity)"
if component_type == "ESC" and I_max is not None:
command = command+" order by abs("+str(I_max)+"-I_motorax)"
if component_type == "motor" and Kv is not None:
command = command+" order by abs("+str(Kv)+"-kv)"
if component_type == "prop" and diameter is not None:
command = command+" order by abs("+str(diameter)+"-Diameter)"
if component_type == "prop" and pitch is not None:
command = command+" order by abs("+str(pitch)+"-Pitch)"
# Get in random order
command = command+" order by RANDOM() limit 1"
# Get record
db_cur.execute(command)
try:
record = np.asarray(db_cur.fetchall())[0]
except IndexError:
raise DatabaseRecordNotFoundError(command)
return record | [
"cory.goates@aggiemail.usu.edu"
] | cory.goates@aggiemail.usu.edu |
ac44ea363414b315f23f7fbc15c32734924d4a71 | a635b8d51016220a6d84808def431c27dde41b90 | /libcms/apps/contact_form/apps.py | 6b76faf1767e1218cad4697c9243b233da6b320b | [] | no_license | isergey/chel | aab3ac98ae2a10258f7a5afce88c74f9e13a2d7f | d1a38bfe7ebba80d9c39ae3b0d54ebfd2965046c | refs/heads/master | 2023-07-07T02:13:41.363452 | 2023-06-26T10:25:14 | 2023-06-26T10:25:14 | 3,816,204 | 1 | 0 | null | 2023-03-31T14:52:31 | 2012-03-24T09:33:53 | JavaScript | UTF-8 | Python | false | false | 151 | py | from django.apps import AppConfig
class ContactFormConfig(AppConfig):
name = 'contact_form'
verbose_name = 'Контактная форма'
| [
"dostovalov@gmail.com"
] | dostovalov@gmail.com |
861b6bc3e0be629951d00a347532e405e062548d | f68732bc40a7a90c3a1082e4b3a4154518acafbb | /script/dbus/systemBus/power/003_refreshBatteries.py | adf2cc74cbaca7a47c8533a5cd766f34c53db75c | [] | no_license | lizhouquan1017/dbus_demo | 94238a2307e44dabde9f4a4dd0cf8ec217260867 | af8442845e722b258a095e9a1afec9dddfb175bf | refs/heads/master | 2023-02-11T19:46:27.884936 | 2021-01-08T05:27:18 | 2021-01-08T05:27:18 | 327,162,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # -*- coding: utf-8 -*-
# ****************************************************
# @Test Case ID: 003_refreshBatteries
# @Test Description: 刷新所有电池设备的状态
# @Test Condition:
# @Test Step: 1.刷新所有电池设备的状态
# @Test Result: 1.检查刷新成功;
# @Test Remark:
# @Author: ut000511
# *****************************************************
import pytest
from frame.base import OSBase
from aw.dbus.systemBus import power
class TestCase(OSBase):
def setUp(self):
self.Step("预制条件1:无")
@pytest.mark.public
def test_step(self):
self.Step("步骤1:刷新所有电池设备的状态并检查刷新成功")
power.refreshBatteries()
def tearDown(self):
pass
| [
"lizhouquan@uniontech.com"
] | lizhouquan@uniontech.com |
3e93a618cae2c252cafc5fc47b87b17fe445a0ba | 9b862cc2ca6cc29a6efe4e783165bc51a98c7790 | /pmr2/layer/tests/utility.py | 27627c6d5e1e489890c10628a3f0be94be34068a | [] | no_license | PMR2/pmr2.layer | 5b836f76bc9da676a9fab5c8137ebd788ea77c8e | 394f5b99e1a74169ccddb08aa0b5bf5bc3de513d | refs/heads/master | 2020-05-07T16:23:50.692864 | 2015-07-27T05:42:14 | 2015-07-27T05:42:14 | 6,539,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | import zope.interface
from pmr2.layer.utility import ConditionalLayerApplierBase
class IExampleTestLayer(zope.interface.Interface):
"""Example."""
class ITestLayer1(zope.interface.Interface):
pass
class ITestLayer2(zope.interface.Interface):
pass
class ITestLayer3(ITestLayer1):
pass
class TestLayerApplier(ConditionalLayerApplierBase):
layer = IExampleTestLayer
def condition(self, request):
return 'application/vnd.example.com-v1' in request['HTTP_ACCEPT']
class MultiTestLayerApplier(ConditionalLayerApplierBase):
layer = [ITestLayer1, ITestLayer2, ITestLayer3]
def condition(self, request):
return 'application/vnd.example.com.tests' in request['HTTP_ACCEPT']
| [
"tommy.yu@auckland.ac.nz"
] | tommy.yu@auckland.ac.nz |
049b6e36c7996d5ca9afedd1bdd9f3df9cf8f786 | 418bb4401b66d2edd6195a3a1b16177a1c341f35 | /paras.py | b0598444d917836404277ca111a72b8bc693fa35 | [] | no_license | victorsoda/TrafficAnalysis | 1cadc55cd82fe1a936af619fad3269d6466ca078 | ce61d8c60aad972ea8ed9c255e50317879a62dba | refs/heads/master | 2021-04-03T09:39:44.882831 | 2019-03-07T08:07:56 | 2019-03-07T08:07:56 | 124,986,491 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | import pprint
import numpy as np
data_path = './data/'
volume_file = data_path + 'volume.csv'
travel_time_file = data_path + 'travel_time.csv'
road_id_travel_time_file = data_path + 'road_id_travel_time.csv'
ssid2_road_id_file = data_path + 'ssidA&B_road_id.csv'
lane_direction_file = data_path + 'lane_direction.csv'
origin_data_file = data_path + 'origin_data.csv' # 生成的初始数据文件
door_data_file = data_path + 'door_data.csv' # 使用源代码开门视频的create_examples输出的data,验证pursuit的正确性
example_data_file = data_path + 'example_data.txt' # create_examples生成的文件
result_recorder_file = data_path + 'result_recorder.txt'
TIME_SLICE = 5 # 宣城数据集的时间分片为5分钟
INF = 1e10
pp = pprint.PrettyPrinter()
# print(origin_data_file[:-4])
# a = np.zeros((8, 2))
# print(a)
# row = np.array([0, 0, 1, 1, 0])
# print(list(row) * 4)
# # ind = [2, 3]
# a = {2, 3}
# [x for x in ] += 1
# print(row)
# action_index = np.where(np.array(row) == 1)
# action_index = [x + 1 for x in action_index[0]]
# print(action_index)
# t_val = np.array([[11, 12, 13, 14, 15, 16, 17],
# [21, 22, 23, 24, 25, 26, 27],
# [31, 32, 33, 34, 35, 36, 37]])
# print(t_val[:, 0:3])
# print(t_val[:, 3:])
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
63691433bc972e320295650c2478b9ec64b6db2c | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/Simulation/Pythia/Unparticles/testsplit/testfrag/STest1p5Unp3000p0_M_2000.py | ed61b1033bbad9c751fb25d90d1d0e764933c73e | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 1,255 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsUnpart:ffbar2gammagamma = on',
'ExtraDimensionsUnpart:gg2gammagamma = on',
'ExtraDimensionsUnpart:LambdaU = 3000.0',
'ExtraDimensionsUnpart:lambda = 1.0',
'ExtraDimensionsUnpart:dU = 1.5',
#'ExtraDimensionsUnpart:spinU = 2',
'PhaseSpace:pTHatMin = 70.0',
'PhaseSpace:mHatMin = 2000',
'PhaseSpace:mHatMax = 1',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"uzzie.perez@cern.ch"
] | uzzie.perez@cern.ch |
9245ad3b8c46ec1710475f237f307cd6276b4300 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D96A/PRODEXD96AUN.py | 24832a41a8fb141359e2e8b5b0172a9e2afcaabf | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 968 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD96AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 2},
{ID: 'MEA', MIN: 1, MAX: 1},
{ID: 'NAD', MIN: 1, MAX: 2},
{ID: 'RFF', MIN: 1, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'IMD', MIN: 1, MAX: 99, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 10},
{ID: 'LIN', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'GIS', MIN: 0, MAX: 2},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'MEA', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'TDT', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
88ebbbd5cb8459262007d071855eee9e264076d6 | 382f4acfd565be4aedb07c694e8daa489ff3e70a | /eveutil2/bin/update-markethistory | 1e2c4e1a73d91087594563d88006e87373b3a338 | [] | no_license | electusmatari/legacy | 913c5a9f68074d1fa793b0e3ff76fd3f9c3f481e | 9266e955398a0c8279b82b8347a85d9186a455da | refs/heads/master | 2021-01-22T05:24:13.935562 | 2013-09-29T11:44:19 | 2013-09-29T11:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,813 | #!/usr/bin/env python
# CREATE TABLE market_history (
# id SERIAL NOT NULL,
# day DATE NOT NULL,
# regionid BIGINT NOT NULL,
# typeid BIGINT NOT NULL,
# orders BIGINT NOT NULL,
# movement BIGINT NOT NULL,
# max DOUBLE PRECISION NOT NULL,
# avg DOUBLE PRECISION NOT NULL,
# min DOUBLE PRECISION NOT NULL,
# UNIQUE (day, regionid, typeid)
# );
import evelib.newdb as evedb
import emcom.gmi
import datetime
import urllib
import bz2
import StringIO
import csv
import sdb
REGIONS = ['Heimatar', 'Metropolis', 'Molden Heath', 'The Forge']
HISTORIC_URL = "http://export.eve-metrics.com/historic/%s.csv.bz2"
def main():
conn = evedb.connect()
c = conn.cursor()
c.execute("SELECT regionname, regionid FROM ccp.mapregions")
regionids = dict(c.fetchall())
start = datetime.datetime.utcnow() - datetime.timedelta(days=367)
for region in REGIONS:
regionid = regionids[region]
c.execute("SELECT day, typeid FROM market_history "
"WHERE regionid = %s "
" AND day > %s",
(regionid, start))
known_days = {}
for day, typeid in c.fetchall():
known_days.setdefault(typeid, set())
known_days[typeid].add(long(day.strftime("%Y%m%d")))
url = urllib.urlopen(HISTORIC_URL % (regionid,))
rows = csv.reader(StringIO.StringIO(bz2.decompress(url.read())))
for row in rows:
if row[0] == 'type_id':
continue
if len(row) != 7:
print "Bad row: %r" % (row,)
raise RuntimeError()
(type_id, orders, movement, max, avg, min, date) = row
type_id = int(type_id)
orders = long(orders)
movement = long(movement)
max = float(max)
avg = float(avg)
min = float(min)
date = datetime.datetime.strptime(date, "%Y-%m-%d")
if date < start:
continue
datenum = long(date.strftime("%Y%m%d"))
if datenum not in known_days.get(type_id, []):
try:
c.execute("INSERT INTO market_history (day, regionid, "
" typeid, orders, movement, "
" max, avg, min) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)",
(date, regionid, type_id, orders, movement,
max, avg, min))
conn.commit()
except sdb.IntegrityError:
conn.rollback()
# for typerow in emcom.gmi.TYPE_DATA:
# if typerow[1] is not None:
# continue
# for historyrow in evemetrics.history([typerow[0]], regions=REGIONS):
# insert(c, historyrow)
conn.commit()
def isknown(c, day, regionid, typeid):
c.execute("SELECT EXISTS (SELECT * FROM market_history "
"WHERE day = %s "
" AND regionid = %s "
" AND typeid = %s)",
(day, regionid, typeid))
return c.fetchone()[0]
import bz2
class BZReader(object):
def __init__(self, bzstream, bufsize=1024*1024):
self.bzstream = bzstream
self.bufsize = bufsize
def __iter__(self):
buf = ""
decompress = bz2.BZ2Decompressor()
while True:
bzdata = self.bzstream.read(self.bufsize)
if bzdata == '':
if buf != '':
yield buf + "\n"
return
buf += decompress.decompress(bzdata)
while "\n" in buf:
i = buf.index("\n")
line = buf[:i+1]
buf = buf[i+1:]
yield line
if __name__ == '__main__':
main()
| [
"forcer@forcix.cx"
] | forcer@forcix.cx | |
45837862fda78545ccc443790fea864b972febcf | ffb627b58f0553fc8bf86c0d100db1dde2015cfe | /week 1/day5/9093.py | c1d01d603408c6811fa77bb581e0ac4e16eb00b5 | [] | no_license | DentiQ/CodeTest | a208bb1250e18fca9d336b93a5c2e4807c621866 | a8d21447ad2cefc583b45c437623647abde11d95 | refs/heads/master | 2023-06-04T06:12:33.540950 | 2021-06-30T17:00:24 | 2021-06-30T17:00:24 | 363,316,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | n = int(input())
for i in range(n):
arr = input().split()
ans = []
for word in arr:
ans.append(word[::-1])
print(*ans)
| [
"dentiq0414@gmail.com"
] | dentiq0414@gmail.com |
9aa280d18399f9265f1b7aca41ffadaf74450035 | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/1378.py | 0e37e33b27dc06733c4d4980b3cd091d677d39e5 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 216 | py | def word_count(phrase):
words = phrase.split()
result = {}
for word in words:
if word in result:
result[word] += 1
else:
result[word] = 1
return result
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
3af9e10c47b578144db49dc4a3c0c93d4a424b09 | e49a07ad215172e9c82cb418b10371bf0ce1c0f7 | /第2章 python核心编程/第1节.python核心编程/02-python高级-2/2-装饰器/04-多个装饰器.py | 12d352b3d7c3d9596c627310543bec4da181c946 | [] | no_license | taogangshow/python_Code | 829c25a7e32ead388c8b3ffa763cb9cf587bfd7b | 4b3d6992ec407d6069f3187ca7e402a14d863fff | refs/heads/master | 2022-12-16T01:26:17.569230 | 2018-11-16T10:07:59 | 2018-11-16T10:07:59 | 157,832,985 | 0 | 1 | null | 2022-11-25T09:55:32 | 2018-11-16T08:00:13 | Python | UTF-8 | Python | false | false | 404 | py | #定义函数:完成包裹数据
def makeBold(fn):
def wrapped():
return "<b>" + fn() + "</b>"
return wrapped
#定义函数:完成包裹数据
def makeItalic(fn):
def wrapped():
return "<i>" + fn() + "</i>"
return wrapped
@makeBold #test3 = makeBold(test3)
@makeItalic #test3 = makeItalic(test3)
def test3():
return "hello world-3"
ret = test3()
print(ret)
| [
"cdtaogang@163.com"
] | cdtaogang@163.com |
3605648a739014c7dc4d6313d88cac67210e7236 | 1395576291c1e8b34981dbcbfcd0fdda020083b8 | /dist_cts/dist_fleet_2.0/dist_collective_get_world_size.py | be03b9b1e048b699ca046c8e42e74f422e9b1be2 | [] | no_license | gentelyang/scripts | a8eb8a3cc5cc5bac753f1bb12033afaf89f03404 | e3562ab40b574f06bba68df6895a055fa31a085d | refs/heads/master | 2023-06-06T12:38:37.002332 | 2021-06-15T05:09:06 | 2021-06-15T05:09:06 | 262,957,519 | 0 | 4 | null | 2021-01-10T08:28:11 | 2020-05-11T06:28:08 | Python | UTF-8 | Python | false | false | 925 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
#======================================================================
#
# Copyright (c) 2017 Baidu.com, Inc. All Rights Reserved
#
#======================================================================
"""
/***************************************************************************
*
* Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
* @file test.py
* @author liyang109@baidu.com
* @date 2020-12-30 15:53
* @brief
*
**************************************************************************/
"""
import sys
import paddle.distributed as dist
from utils import run_priority
@run_priority(level='P0')
def test_get_world_size():
"""get world size"""
assert dist.get_world_size() == 2
print("{} ... ok".format(sys._getframe().f_code.co_name))
if __name__ == '__main__':
test_get_world_size() | [
"liyang109@baidu.com"
] | liyang109@baidu.com |
2d25c71d6e148cc3e4205d67c004c263518568d0 | 2eb6b96565367f2f843c3352cce03fcd7b575757 | /module_sellenium/_test_json.py | 218234e36510aeda14db74969f4e18f0f51ddf6d | [] | no_license | onitonitonito/web_beautifulsoup_scrapping | a712fc7fd58d28f89e5c1ddcbf3faac74552e263 | d07c6eebbf2731754b72b7d13f90be9d0082e776 | refs/heads/master | 2022-12-10T14:58:27.045305 | 2020-06-09T22:21:48 | 2020-07-15T22:22:50 | 148,488,834 | 1 | 1 | null | 2022-12-08T09:39:14 | 2018-09-12T13:56:23 | Python | UTF-8 | Python | false | false | 2,561 | py | """
# Reading and Writing JSON to a File in Python
# By Scott Robinson • August 17, 2016 • 5 Comments
# https://stackabuse.com/reading-and-writing-json-to-a-file-in-python/
"""
# print(__doc__)
import os
# script_run ... different dir script_run vs. terminal(cmd)
print(os.getcwd()) # --> root / working
print(os.path.abspath(os.path.curdir)) # --> root / working
print(os.path.dirname(__file__)) # --> working / working (this!)
import json
dir_current = os.path.dirname(__file__)
dir_read_write = f'{dir_current}/statics'
filename_with_dir = f'{dir_read_write}/_test_json.json'
data = {}
data['people'] = []
data['people'].append({
'name': 'Scott',
'website': 'stackabuse.com',
'from': 'Nebraska'
})
data['people'].append({
'name': 'Larry',
'website': 'google.com',
'from': 'Michigan'
})
data['people'].append({
'name': 'Tim',
'website': 'apple.com',
'from': 'Alabama'
})
with open(file=filename_with_dir, mode='w', encoding='utf8') as file:
json.dump(obj=data, fp=file, sort_keys=True, indent=2, ensure_ascii=False)
"""
{
"people": [
{
"from": "Nebraska",
"name": "Scott",
"website": "stackabuse.com"
},{
"from": "Michigan",
"name": "Larry",
"website": "google.com"
},{
"from": "Alabama",
"name": "Tim",
"website": "apple.com"
}
]
}
"""
with open(file=filename_with_dir, mode='r', encoding='utf8') as file:
_test_json = json.load(fp=file)
for i, people in enumerate(_test_json['people'],1):
[print(echo)
for echo in [
f"\n\n"
f"-------------------------",
f" ({i}) PEOPLE INFORMATION",
f"-------------------------",
f"* Name : {people['name']}",
f"* From : {people['from']}",
f"* Web site : {people['website']}",
f"-------------------------",
]
]
"""
-------------------------
(1) PEOPLE INFORMATION
-------------------------
* Name : Scott
* From : Nebraska
* Web site : stackabuse.com
-------------------------
-------------------------
(2) PEOPLE INFORMATION
-------------------------
* Name : Larry
* From : Michigan
* Web site : google.com
-------------------------
-------------------------
(3) PEOPLE INFORMATION
-------------------------
* Name : Tim
* From : Alabama
* Web site : apple.com
-------------------------
"""
| [
"nitt0x0@gmail.com"
] | nitt0x0@gmail.com |
51541cfc0efce6e3743209630d5aae189c992744 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Kivy/kivy/kivy/tests/test_filechooser_unicode.py | f90c297541a79baedf8ea10dc76f7b6cc75794d2 | [
"MIT"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:29d8bc5fa93406bc313e889d4aeb384ef0b92ca711d0d2dcacbfa64e34adbbca
size 3716
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
2adc1b4411370663446ddd23ae426129db13c18d | 133d0f5c7122829ec1e5a5dacdd96a3fa8aa4fc2 | /tests/typecheck.py | 0830182b40042742b608397268f7f0e3201f1eff | [
"Apache-2.0"
] | permissive | izgzhen/cozy | e4b44bb6939c6892919225bba91520913ba55a75 | fc57fdccdd52c5ecf4c4ae4e8b80af97e8119b77 | refs/heads/master | 2020-03-29T01:01:11.270275 | 2018-09-27T16:39:51 | 2018-09-27T16:39:51 | 149,367,207 | 0 | 0 | Apache-2.0 | 2018-09-19T00:12:02 | 2018-09-19T00:12:02 | null | UTF-8 | Python | false | false | 2,790 | py | import unittest
from cozy.syntax_tools import mk_lambda
from cozy.target_syntax import *
from cozy.structures.heaps import *
from cozy.typecheck import typecheck, retypecheck
class TestTypechecking(unittest.TestCase):
def test_map_over_noncollection(self):
x = EVar("x").with_type(TInt())
e = EMap(x, mk_lambda(TInt(), lambda elem: EBool(True)))
errs = typecheck(e, { x.id : x.type })
assert errs
def test_filter_over_noncollection(self):
x = EVar("x").with_type(TInt())
e = EFilter(x, mk_lambda(TInt(), lambda elem: EBool(True)))
errs = typecheck(e, { x.id : x.type })
assert errs
def test_flatmap(self):
e = EBinOp(EFlatMap(EBinOp(EVar('ys').with_type(TBag(THandle('ys', TInt()))), '+', EEmptyList().with_type(TBag(THandle('ys', TInt())))).with_type(TBag(THandle('ys', TInt()))), ELambda(EVar('_var12').with_type(THandle('ys', TInt())), EUnaryOp('sum', ESingleton(ENum(1).with_type(TInt())).with_type(TBag(TInt()))).with_type(TInt()))).with_type(TBag(TInt())), '==', ENum(0).with_type(TInt())).with_type(TBool())
assert not retypecheck(e)
def test_sum(self):
xs = EVar("xs").with_type(TBag(TBool()))
e = EUnaryOp("sum", xs)
assert not retypecheck(e)
def test_ECond_1(self):
x = ENum(1).with_type(INT)
assert retypecheck(ECond(EBool(True), x, x))
def test_ECond_2(self):
x = ENum(1).with_type(INT)
y = EBool(False)
assert not retypecheck(ECond(EBool(True), x, y))
def test_ECond_3(self):
x = ENum(1).with_type(INT)
y = EBool(False)
assert not retypecheck(ECond(EBool(True), y, x))
def test_ECond_4(self):
x = ENum(1).with_type(INT)
assert not retypecheck(ECond(x, x, x))
def test_lambda_arg_inference(self):
s = ESingleton(ETRUE)
x = EVar("x")
assert retypecheck(EFilter(s, ELambda(x, x)))
assert retypecheck(EMap(s, ELambda(x, x)))
assert retypecheck(EMakeMap2(s, ELambda(x, x)))
def test_heaps(self):
e = ECond(EBinOp(EBinOp(EMapGet(EStateVar(EMakeMap2(EVar('xs'), ELambda(EVar('_var39381'), EUnaryOp('len', EFilter(EVar('xs'), ELambda(EVar('_var39382'), EBinOp(EVar('_var39381'), '==', EVar('_var39382')))))))), ENum(0).with_type(INT)), '==', ENum(1).with_type(INT)), 'and', EBinOp(ENum(0).with_type(INT), '==', EStateVar(EArgMin(EVar('xs'), ELambda(EVar('_var21501'), EVar('_var21501')))))), EHeapPeek2(EStateVar(EMakeMinHeap(EVar('xs'), ELambda(EVar('_var21501'), EVar('_var21501')))), EStateVar(EUnaryOp('len', EVar('xs')))), EStateVar(EArgMin(EVar('xs'), ELambda(EVar('_var21501'), EVar('_var21501')))))
assert retypecheck(e, env={
"xs": INT_BAG,
"_var21501": INT})
| [
"loncaric@cs.washington.edu"
] | loncaric@cs.washington.edu |
fd2b0f889ccef3fce7487a8374f5de0e950177a6 | 561464f786e855668663a6928f123beaf05b1f1f | /wsgi.py | 51888168228f9dbeb1e14074af7757da37a66770 | [] | no_license | Coderpool/Coderpool-Registration-App | 0ee2ebc0e5c14d1af98f13d19e9604c1afaa7878 | adf29303cf113650544ea86fa5b17e52d89e99fc | refs/heads/master | 2021-01-15T13:29:30.045896 | 2013-12-06T01:55:25 | 2013-12-06T01:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
project_home = BASE_DIR
if project_home not in sys.path:
sys.path.append(project_home)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| [
"rajeevs1992@gmail.com"
] | rajeevs1992@gmail.com |
05d70d9526d627b8fe1b541ceec8e5df4e321c8a | 5dc7dcf5938a60f1dc23cb0bf7578f2ae9ca283a | /main.py | d26fe77abc6cb68504c30ed0aeb4cedd063bbbc9 | [] | no_license | afcarl/Visual-Ballistic-Roulette-Vision | 41cbb83b6115c8d7cf81a1c05054a1a02da82de7 | d4209b92f529256b956fbae78ebaac856cd61e04 | refs/heads/master | 2020-03-22T01:10:09.763037 | 2017-02-10T06:38:04 | 2017-02-10T06:38:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | import os
from ball_tracking_from_gradients import start_ball_analysis
from utils import results_dir
from wheel_green_tracking_from_frames import start_wheel_analysis
def list_to_str(s):
return str(', '.join(['{0:.2f}'.format(b) for b in s]) + '\n')
if __name__ == '__main__':
print('Python script has started. Please wait.')
balls = start_ball_analysis()
wheels = start_wheel_analysis()
print('\n -- \n')
print('BALL = {}'.format(balls))
print('WHEEL = {}'.format(wheels))
results_filename = os.path.join(results_dir(), 'results.txt')
with open(results_filename, 'wt', encoding='utf-8') as f:
f.write(list_to_str(balls))
f.write(list_to_str(wheels))
| [
"premy@reactive.co.jp"
] | premy@reactive.co.jp |
158ccde3af8d872aa64f1f1b97bb0a698fd8a377 | c4fa1ebcdd413c4ab3f0979ee3beead8a8809870 | /providers/edu/pdxscholar/migrations/0001_initial.py | 795f466f367dfb4f418cfb10dda1a00b2ca7243e | [] | no_license | terroni/SHARE | e47f291db7cf100d29a7904fe820e75d29db1472 | a5631f441da1288722c68785b86128c854cbe7c1 | refs/heads/develop | 2020-12-03T02:29:47.381341 | 2016-07-11T19:40:27 | 2016-07-11T19:40:27 | 63,097,148 | 1 | 0 | null | 2016-07-11T19:45:51 | 2016-07-11T19:45:50 | null | UTF-8 | Python | false | false | 667 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 15:45
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('edu.pdxscholar'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('edu.pdxscholar'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('edu.pdxscholar'),
),
]
| [
"cwisecarver@cos.io"
] | cwisecarver@cos.io |
008b8ae4c04e14d9cdce23fbd160c2d6fada69a3 | e2e1732b6eb1a7a6dfeba76762851ad06eb8e482 | /wangban/wangban/spiders/beifen/2/ningbo/daxiekaifaqu.py | 1c2493c906df0eb3ec37744b776a18dbfa62a0cb | [] | no_license | nightqiuhua/bigCrawlers | 551e80d55df492c89ae0e0e0bd70c0e5f873068d | 19b86130c8af057d06014865d150e3d2ed6cc319 | refs/heads/main | 2023-03-23T01:13:26.021850 | 2021-03-03T15:09:28 | 2021-03-03T15:09:28 | 344,165,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import os
from urllib.parse import urlparse
import re
import time
from wangban_utils.Json2Xpath import Json2XPath,XPath
from wangban_utils.single_mode import singleton
from urllib.parse import urljoin
from scrapy.utils.project import get_project_settings
from spiders.basemodel import DIYBaseSpider
SETTINGS = get_project_settings()
JSONFILE = os.path.join(SETTINGS['BASE_JSONFILE_PATH'],'daxiekaifaqu.json')
@singleton
class DaXieKaiFaQuSpider(DIYBaseSpider):
name = 'daxiekaifaqu'
start_urls = ['http://daxie.bidding.gov.cn/']
source_website = '宁波市大榭开发区公共资源交易网'
specific_area = '大榭开发区'
source_url = 'http://daxie.bidding.gov.cn/'
links_tree = {}
loss_urls = {}
column_urls_pool = []
def __init__(self,jsonfile = JSONFILE):
super().__init__()
self.xp = Json2XPath(jsonfile).get_xpath()
self.post_suf = 'index_{}'
def get_totalpage(self,response):
try:
total_page = response.xpath(self.xp.total_page).extract()[0]
#print(response.xpath(self.xp.total_page).extract())
total_page = re.findall(r'/(\d+)页',total_page)[0]
except Exception as e:
print('get_totalpage error_reason',e)
print('url',response.url)
total_page = 1
#print('total_page is ',total_page)
total_page = self.set_totalpage(total_page)
return total_page
def get_elem_url(self,element,response=None):
an_url = self.source_url
try:
elem_url = element.xpath(self.xp.an_url).extract()[0]
an_url = urljoin(self.source_url,elem_url)
except Exception as e:
print('get_elem_url error',e)
print('url',response.url)
an_url = self.source_url
#print(an_url)
return an_url
def get_on_date(self,element,response=None):
try:
on_date = element.xpath(self.xp.on_date).extract()[0]
on_date = re.findall(r'(\d+-\d+-\d+)',on_date)[0]
except Exception as e:
on_date = 'NONE'
print('get on date error',e)
print('url',response.url)
#print(on_date)
return on_date
def get_an_title(self,element,response=None):
an_title = 'NONE'
try:
an_title = element.xpath(self.xp.an_title).extract()[0]
except Exception as e:
print('get_an_title error',e)
print('url',response.url)
#print(an_title)
return an_title
def cre_page_url(self,f_p_url,page):
if page == 0:
page = 1
page_url = f_p_url.replace('index',self.post_suf.format(page))
#print('page_url',page_url)
return page_url | [
"1320551630@qq.com"
] | 1320551630@qq.com |
8bc54841f877590a71e85e5c7203a64d7bac48f9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03959/s162228731.py | 956fa1077360361e08a0ffa20848604515bb78c8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | def work(n, t, a):
if t[-1] != a[0]:
return 0
middle = 0
for i in xrange(n):
if t[i] == a[i]:
if t[i] != t[-1] or t[i] != a[0]:
return 0
middle = i
break
for i in xrange(middle, n):
if t[i] < a[i]:
return 0
record = [None] * n
left, right = 0, n - 1
while left <= right and t[left] < a[left]:
if not left or t[left] > t[left-1]:
record[left] = 1
else:
record[left] = t[left]
left += 1
record[left] = 1
while left < right and a[right] <= t[right]:
if right == n - 1 or a[right] > a[right+1]:
record[right] = 1
else:
record[right] = a[right]
right -= 1
ans = 1
for i in xrange(n):
ans = (ans*record[i]) % (10**9+7)
return ans
n = int(raw_input())
t = map(int, raw_input().split())
a = map(int, raw_input().split())
print work(n, t, a)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
44c2bb2dde1375f2c6043f1a16a82e63308efb08 | 5b95b83ba7e18cb40babab37bcb0f5b63bfef3bb | /script15.py | 443a772f633bd2334e704eeac1f382924dbc5e23 | [] | no_license | Moandh81/w3ressources_python | d9269959cc35c1df4a0ca9d37575c94fb96195f6 | 7a3c65bca50097c2e9b92591443dcb6b03a384a3 | refs/heads/master | 2020-03-30T22:42:23.673212 | 2019-11-11T19:58:16 | 2019-11-11T19:58:16 | 151,675,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!/usr/bin/python
# -*- coding: utf-8 -*
#Python Data Type: String - Exercises
#Write a Python function to get a string made of 4 copies of
#the last two characters of a specified string (length must be at least 2
chaine="Hello world"
if len(chaine) >=2:
print((chaine[len(chaine) -2] + chaine[len(chaine) - 1]) * 4)
else:
print("la chaine n'a pas plus de deux caracteres et par conséquent elle ne peut pas etre traitée ") | [
"anis.dhouieb@gmail.com"
] | anis.dhouieb@gmail.com |
5d0e40c90b389c948a09261dfa0d920b443dbb01 | df3acbc57da3462643288504c5f5c8ba00e142a6 | /DangDang_Books/dangdang/dangdang/settings.py | 1d82ea3fb7ff2f9b3fc9c766c0089c03cbbae342 | [] | no_license | tangzhen10/Python_Crawler | 7817c38b01410364e94f76694cb92826b0859400 | 18dfd7e755b163ce15f9acd0bc49e8c450ff6198 | refs/heads/master | 2020-07-27T08:28:13.245375 | 2019-09-04T11:04:23 | 2019-09-04T11:04:23 | 209,029,654 | 1 | 0 | null | 2019-09-17T10:55:10 | 2019-09-17T10:55:10 | null | UTF-8 | Python | false | false | 3,092 | py | # -*- coding: utf-8 -*-
# Scrapy settings for dangdang project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'dangdang'
SPIDER_MODULES = ['dangdang.spiders']
NEWSPIDER_MODULE = 'dangdang.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'dangdang (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'dangdang.middlewares.DangdangSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'dangdang.middlewares.DangdangDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'dangdang.pipelines.DangdangPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"why957177569@163.com"
] | why957177569@163.com |
cbf52a5a72f51750a9828b5b79b7616c161ce741 | 1cd909991a97c12752d81e334659c573c3bb6652 | /cheritest/trunk/tests/mt/test_mt_rdhwr.py | 2a1403eacaf47d96ff7989d204bd617ee83ff27c | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-beri-hw-sw-1.0"
] | permissive | conan789123/beri | 47ce884d5bf5635ef5dd3e64adfe7735996384ad | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | refs/heads/master | 2020-03-28T05:57:17.172443 | 2017-03-27T17:21:10 | 2017-03-27T17:21:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | #-
# Copyright (c) 2014 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_mt_rdhwr(BaseBERITestCase):
@attr('mt')
@attr('rdhwr')
@attr('userlocal')
def test_rdhwr_1(self):
'''Test that the user-local register has a per-thread value'''
self.assertRegisterEqual(self.MIPS.a0, 0, "The user local register did not have a per-thread value")
| [
"cl-beri-discuss@lists.cam.ac.uk"
] | cl-beri-discuss@lists.cam.ac.uk |
331bef022949cda2d9ab90b645a7c857d91a7fd5 | abc4ad00b4f267e43954db01f6540282a5d0ffea | /code/export_inference_graph.py | 2b35b3534477d1b4549653aef6eb001111178eb3 | [] | no_license | yuanyuanzijin/flower-recognition | 0ed6ab7bbcf91779cefab77f924942071690fe0e | 971e43b4926c6b8b14e91c3b1ccbeea2d8d60334 | refs/heads/master | 2021-08-29T23:14:33.395849 | 2017-12-14T23:49:50 | 2017-12-14T23:49:50 | 114,340,517 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,953 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Saves out a GraphDef containing the architecture of the model.
To use it, run something like this, with a model name defined by slim:
bazel build tensorflow_models/slim:export_inference_graph
bazel-bin/tensorflow_models/slim/export_inference_graph \
--model_name=inception_v3 --output_file=/tmp/inception_v3_inf_graph.pb
If you then want to use the resulting model with your own or pretrained
checkpoints as part of a mobile model, you can run freeze_graph to get a graph
def with the variables inlined as constants using:
bazel build tensorflow/python/tools:freeze_graph
bazel-bin/tensorflow/python/tools/freeze_graph \
--input_graph=/tmp/inception_v3_inf_graph.pb \
--input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \
--input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \
--output_node_names=InceptionV3/Predictions/Reshape_1
The output node names will vary depending on the model, but you can inspect and
estimate them using the summarize_graph tool:
bazel build tensorflow/tools/graph_transforms:summarize_graph
bazel-bin/tensorflow/tools/graph_transforms/summarize_graph \
--in_graph=/tmp/inception_v3_inf_graph.pb
To run the resulting graph in C++, you can look at the label_image sample code:
bazel build tensorflow/examples/label_image:label_image
bazel-bin/tensorflow/examples/label_image/label_image \
--image=${HOME}/Pictures/flowers.jpg \
--input_layer=input \
--output_layer=InceptionV3/Predictions/Reshape_1 \
--graph=/tmp/frozen_inception_v3.pb \
--labels=/tmp/imagenet_slim_labels.txt \
--input_mean=0 \
--input_std=255 \
--logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.platform import gfile
from datasets import dataset_factory
from preprocessing import preprocessing_factory
from nets import nets_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to save.')
tf.app.flags.DEFINE_boolean(
'is_training', False,
'Whether to save out a training-focused version of the model.')
tf.app.flags.DEFINE_integer(
'default_image_size', 224,
'The image size to use if the model does not define it.')
tf.app.flags.DEFINE_string('dataset_name', 'imagenet',
'The name of the dataset to use with the model.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'output_file', '', 'Where to save the resulting file to.')
tf.app.flags.DEFINE_string(
'dataset_dir', '', 'Directory to save intermediate dataset files to')
FLAGS = tf.app.flags.FLAGS
def main(_):
if not FLAGS.output_file:
raise ValueError('You must supply the path to save to with --output_file')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default() as graph:
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'validation',
FLAGS.dataset_dir)
preprocessing_name = FLAGS.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=FLAGS.is_training)
if hasattr(network_fn, 'default_image_size'):
image_size = network_fn.default_image_size
else:
image_size = FLAGS.default_image_size
# placeholder = tf.placeholder(name='input', dtype=tf.float32,
# shape=[1, image_size, image_size, 3])
placeholder = tf.placeholder(name='input', dtype=tf.string)
image = tf.image.decode_jpeg(placeholder, channels=3)
image = image_preprocessing_fn(image, image_size, image_size)
image = tf.expand_dims(image, 0)
network_fn(image)
graph_def = graph.as_graph_def()
with gfile.GFile(FLAGS.output_file, 'wb') as f:
f.write(graph_def.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| [
"jinluyuan@vip.qq.com"
] | jinluyuan@vip.qq.com |
55f8f0d9a4d02a624dfc1d1931ebe55eca75f4a1 | c6ca3fd35bd0e36ab1c3427bd4dfd55fd8cff0f7 | /2020/october/1/11-Oct/mysql(17).py | fd61ec2fe274cb61f84bcc0724e5769b0c5bfbcc | [] | no_license | mohanbabu2706/testrepo | 23ae942d1af4bfbc31c9266daadfd8f9dce431a6 | 5d75d9c65f7174a7418cdc3d00580b99a11f67d0 | refs/heads/master | 2023-01-03T23:48:56.365958 | 2020-11-01T06:38:46 | 2020-11-01T06:38:46 | 300,142,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import mysql.connector
mydb = mysql.connector.connect(
host = "localhost",
user = "myusername",
password = "mypassword",
database = "mydatabase",
)
mycursor = mydb.cursor()
sql = "SELECT * FROM customers ORDER BY name DESC"
mycursor.execute(sql)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
| [
"noreply@github.com"
] | mohanbabu2706.noreply@github.com |
d4dc2c5f7f03abab95daa5989886ca06a7db4c6c | d7589054c9dbcccdfee4213fda2df10f249a60a8 | /blogposts/migrations/0002_auto_20190622_1254.py | 457e9049564ff51cde1fcc2835f3ba4312831161 | [] | no_license | Ruckaiya/djangoblog | aa3e16ce84f37a70b830a795acf450b04b5c5bca | a76c5d223477d29b391915c3778219a36f9f34ce | refs/heads/master | 2020-06-09T00:26:51.396663 | 2019-06-23T10:47:43 | 2019-06-23T10:47:43 | 193,334,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Generated by Django 2.2.2 on 2019-06-22 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogposts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='latestpost',
name='timestamp',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='popular',
name='timestamp',
field=models.DateTimeField(),
),
]
| [
"ruckaiya.awf5@gmail.com"
] | ruckaiya.awf5@gmail.com |
6e5c7d4574ac6bdf771974fca13d24252f6fbbfe | e5ba55ac56d2d07aeebd7253fbe5d186196c9a52 | /catkin_ws/catkin_ws/build/rosserial/rosserial_server/catkin_generated/pkg.installspace.context.pc.py | ae10992ff46064e55de2240a2ca888d8f0938951 | [] | no_license | masiro97/darrsm | 5305a3e7c1fba2635a4925b9e079f45b40162862 | b881d00427d2af5d75ca509a191e57f2890e1ece | refs/heads/master | 2021-05-10T21:57:17.760536 | 2018-01-20T15:13:56 | 2018-01-20T15:13:56 | 111,084,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/cun/catkin_ws/install/include".split(';') if "/home/cun/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rosserial_msgs;std_msgs;topic_tools".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_server"
PROJECT_SPACE_DIR = "/home/cun/catkin_ws/install"
PROJECT_VERSION = "0.7.6"
| [
"estrk7120@gmail.com"
] | estrk7120@gmail.com |
2fb1b577624a5c8248e2d3dda1345033e30fd705 | dbe7731552d8e6d1e63cc0f2e27d3810cc61f350 | /hyper_paras/hp_dqn_2015.py | d4c5e2d826930dfa598be045d574d07259694b32 | [] | no_license | ZhangRui111/rl_breakout_tf | 6bb3f57f2b1d52f196323916393234e8abb990ac | 04f259cd3c32eaffbad87fe1035b0f87c96127b0 | refs/heads/master | 2020-04-08T19:24:16.018734 | 2018-12-18T02:42:56 | 2018-12-18T02:42:56 | 159,653,713 | 1 | 1 | null | 2018-12-18T02:42:57 | 2018-11-29T11:12:04 | Python | UTF-8 | Python | false | false | 190 | py | from hyper_paras.base_hyper_paras import BaseHyperparameters
class Hyperparameters(BaseHyperparameters):
def __init__(self):
super().__init__()
self.model = 'dqn_2015'
| [
"zhangruisg111@163.com"
] | zhangruisg111@163.com |
6943c6920b94ac55fc64ba3c743795f09a5b7748 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2015_01_29_BC_eval/main.py | 9afe65371e8a594387bdee14b3c79e9fe46bed0a | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,530 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 17 16:46:35 2014
@author: nhpnp3
"""
import functions as rr
import numpy as np
import matplotlib.pyplot as plt
filename = 'Results_Ti64_Dream3D_XYdirLoad_210microns_9261el_AbqInp_AnisoLE_005_data_v2_01.vtk'
el = 21
tensor_ID = 1
## The tensorID determines the type of tensor data read from the .vtk file
## if tensorID == 0, we read the stress tensor
## if tensorID == 1, we read the strain tensor
## if tensorID == 2, we read the plastic strain tensor
compl = ['11','22','33','12','23','31']
compd = {'11':0,'22':4,'33':8,'12':1,'23':5,'31':6}
r_real = np.zeros([np.size(compl),el,el,el])
c = 0
for comp in compl:
compp = compd[comp]
r_temp = rr.read_vtk_tensor(filename = filename, tensor_id = tensor_ID, comp = compp)
r_real[c,...] = r_temp.reshape([el,el,el])
print compl
print np.mean(r_real[c,...])
c += 1
euler = rr.read_vtk_vector(filename).reshape([3,el,el,el])
for dispcomp in xrange(np.size(compl)):
plt.close(dispcomp)
## Plot slices of the response
plt.figure(num=dispcomp,figsize=[14,6])
plt.subplot(231)
ax = plt.imshow(euler[0,0,:,:], origin='lower', interpolation='none',
cmap='jet')
plt.colorbar(ax)
plt.title('Microstructure, slice 0')
plt.subplot(232)
ax = plt.imshow(euler[0,np.floor(0.5*el),:,:], origin='lower', interpolation='none',
cmap='jet')
plt.colorbar(ax)
plt.title('Microstructure, slice %s' % np.floor(0.5*el))
plt.subplot(233)
ax = plt.imshow(euler[0,el-1,:,:], origin='lower', interpolation='none',
cmap='jet')
plt.colorbar(ax)
plt.title('Microstructure, slice %s' % (el-1))
dmin = np.min(r_real[dispcomp,...])
dmax = np.max(r_real[dispcomp,...])
plt.subplot(234)
ax = plt.imshow(r_real[dispcomp,0,:,:], origin='lower', interpolation='none',
cmap='jet', vmin=dmin, vmax=dmax)
plt.colorbar(ax)
plt.title('Response, slice 0')
plt.subplot(235)
ax = plt.imshow(r_real[dispcomp,np.floor(0.5*el),:,:], origin='lower', interpolation='none',
cmap='jet', vmin=dmin, vmax=dmax)
plt.colorbar(ax)
plt.title('Response, slice %s' % np.floor(0.5*el))
plt.subplot(236)
ax = plt.imshow(r_real[dispcomp,el-1,:,:], origin='lower', interpolation='none',
cmap='jet', vmin=dmin, vmax=dmax)
plt.colorbar(ax)
plt.title('Response, slice %s' % (el-1)) | [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
60a813a4f05609691d978440ce055ad549e189da | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/client/messenger/proto/bw_chat2/errors.py | 08901cc5ca868928a6c33929c34e99ea139ac423 | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 7,166 | py | # 2016.05.01 15:25:05 Střední Evropa (letní čas)
# Embedded file name: scripts/client/messenger/proto/bw_chat2/errors.py
from gui.Scaleform.locale.MESSENGER import MESSENGER as I18N_MESSENGER
from gui.Scaleform.locale.INGAME_GUI import INGAME_GUI as I18N_INGAME_GUI
from helpers import i18n, html
from helpers.time_utils import makeLocalServerTime
from messenger.proto.interfaces import IChatError
from messenger.proto.shared_errors import ChatCoolDownError, ClientActionError, I18nActionID, I18nErrorID, ChatBanError
from messenger_common_chat2 import MESSENGER_ACTION_IDS as _ACTIONS
from messenger_common_chat2 import MESSENGER_ERRORS as _ERRORS
from messenger_common_chat2 import MESSENGER_LIMITS as _LIMITS
def getChatActionName(actionID):
actionName = _ACTIONS.getActionName(actionID)
i18nKey = I18N_MESSENGER.chat_action(actionName)
if i18nKey is not None:
i18nName = i18n.makeString(i18nKey)
else:
i18nName = actionName
return i18nName
def getBattleCommandExample(msgText):
i18nKey = I18N_INGAME_GUI.chat_example(msgText)
if i18nKey is not None:
i18nName = html.escape(i18n.makeString(i18nKey))
else:
i18nName = msgText
return i18nName
def getChatErrorMessage(errorID, kwargs):
errorName = _ERRORS.getErrorName(errorID)
i18nKey = I18N_MESSENGER.chat_error(errorName)
if i18nKey is not None:
msg = i18n.makeString(i18nKey, **kwargs)
else:
msg = '{0}\\{1}'.format(errorName, kwargs)
return msg
class _BWChat2I18nError(I18nErrorID):
def getName(self):
return _ERRORS.getErrorName(self.errorID)
def getI18nKey(self):
return I18N_MESSENGER.chat_error(self.getName())
class _BWChat2I18nAction(I18nActionID):
def getName(self):
return _ACTIONS.getActionName(self.actionID)
def getI18nName(self):
return getChatActionName(self.actionID)
class _ActionCoolDownError(ChatCoolDownError):
def createAction(self, actionID):
return _BWChat2I18nAction(actionID)
class _BattleCommandError(IChatError):
__slots__ = ('_example', '_coolDown')
def __init__(self, command):
super(_BattleCommandError, self).__init__()
self._example = getBattleCommandExample(command.msgText)
self._coolDown = command.cooldownPeriod
class _BattleCommandCoolDownError(_BattleCommandError):
def __init__(self, command):
super(_BattleCommandCoolDownError, self).__init__(command)
self._coolDown = command.cooldownPeriod
def getMessage(self):
return i18n.makeString(I18N_MESSENGER.CLIENT_ERROR_COMMANDINCOOLDOWN_LIMITED, self._example, self._coolDown)
class _BattleCommandGenericError(_BattleCommandError):
def getMessage(self):
return i18n.makeString(I18N_MESSENGER.CLIENT_ERROR_COMMAND_GENERIC_ERROR, strArg1=self._example)
class _SimpleActionError(ClientActionError):
def createError(self, errorID):
return _BWChat2I18nError(errorID)
def createAction(self, actionID):
return _BWChat2I18nAction(actionID)
class _AdminCommandError(IChatError):
__slots__ = ('_error',)
def __init__(self, error):
super(_AdminCommandError, self).__init__()
self._error = error
def getMessage(self):
return i18n.makeString(I18N_MESSENGER.SERVER_ERRORS_CHATCOMMANDERROR_MESSAGE, error=self._error)
class _SimpleAdminCommandError(_AdminCommandError):
def __init__(self, errorID, kwargs = None):
super(_SimpleAdminCommandError, self).__init__(getChatErrorMessage(errorID, kwargs or {'actionName': i18n.makeString(I18N_MESSENGER.CUSTOM_CLIENT_ACTION_ADMIN_CHAT_COMMAND)}))
class _AdminCommandI18nError(_AdminCommandError):
def __init__(self, keys, kwargs):
super(_AdminCommandI18nError, self).__init__(i18n.makeString(keys, **kwargs))
class _AdminCommandCoolDownError(_AdminCommandError):
def __init__(self):
super(_AdminCommandCoolDownError, self).__init__(i18n.makeString(I18N_MESSENGER.CLIENT_ERROR_COMMAND_IN_COOLDOWN_WO_NAME, floatArg1=_LIMITS.ADMIN_COMMANDS_FROM_CLIENT_COOLDOWN_SEC))
def createCoolDownError(actionID):
command = _ACTIONS.adminChatCommandFromActionID(actionID)
if command:
return _AdminCommandCoolDownError()
else:
command = _ACTIONS.battleChatCommandFromActionID(actionID)
if command:
return _BattleCommandCoolDownError(command)
if _ACTIONS.isRateLimitedBroadcastFromClient(actionID):
coolDown = _LIMITS.BROADCASTS_FROM_CLIENT_COOLDOWN_SEC
elif actionID == _ACTIONS.FIND_USERS_BY_NAME:
coolDown = _LIMITS.FIND_USERS_BY_NAME_REQUEST_COOLDOWN_SEC
elif actionID == _ACTIONS.GET_VOIP_CREDENTIALS:
coolDown = _LIMITS.VOIP_CREDENTIALS_REQUEST_COOLDOWN_SEC
else:
coolDown = None
return _ActionCoolDownError(actionID, coolDown)
def createBroadcastError(args, broadcastID):
errorID = args['int32Arg1']
if not _ACTIONS.isRateLimitedBroadcastFromClient(broadcastID):
raise AssertionError
error = errorID == _ERRORS.IN_CHAT_BAN and ChatBanError(makeLocalServerTime(args['floatArg1']), args['strArg1'])
elif errorID == _ERRORS.IN_COOLDOWN:
error = _ActionCoolDownError(broadcastID, _LIMITS.BROADCASTS_FROM_CLIENT_COOLDOWN_SEC)
else:
error = _SimpleActionError(broadcastID, errorID)
return error
def createAdminCommandError(args):
errorID = args['int32Arg1']
if errorID == _ERRORS.IN_COOLDOWN:
error = _AdminCommandCoolDownError()
else:
error = _SimpleAdminCommandError(errorID)
return error
def createBattleCommandError(args, command):
errorID = args['int32Arg1']
error = None
if errorID == _ERRORS.IN_COOLDOWN:
error = _BattleCommandCoolDownError(command)
elif errorID == _ERRORS.GENERIC_ERROR:
error = _BattleCommandGenericError(command)
return error
def createVOIPError(args, actionID):
errorID = args['int32Arg1']
error, logOnly = None, False
if actionID == _ACTIONS.GET_VOIP_CREDENTIALS:
if errorID == _ERRORS.IN_COOLDOWN:
error = _ActionCoolDownError(_ACTIONS.GET_VOIP_CREDENTIALS, _LIMITS.VOIP_CREDENTIALS_REQUEST_COOLDOWN_SEC)
elif errorID == _ERRORS.GENERIC_ERROR:
logOnly = True
error = 'The player has received the error to the request for getting of voip credential. Perhaps voip connection to the server is lost, the server is reconnecting to voip.'
return (error, logOnly)
def createSearchUserError(args):
errorID = args['int32Arg1']
error = None
if errorID == _ERRORS.IN_COOLDOWN:
error = _ActionCoolDownError(_ACTIONS.FIND_USERS_BY_NAME, _LIMITS.FIND_USERS_BY_NAME_REQUEST_COOLDOWN_SEC)
elif errorID in (_ERRORS.IS_BUSY, _ERRORS.WRONG_ARGS):
error = _SimpleActionError(_ACTIONS.FIND_USERS_BY_NAME, errorID)
return error
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\messenger\proto\bw_chat2\errors.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:25:05 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
1d6d55a629040d2a4f5a21a40cb2d8c050150aa4 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/nat/gateway/_show.py | 1f3a7021343df279377f28627289aacc60a9f5b9 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 7,741 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network nat gateway show",
)
class Show(AAZCommand):
"""Show details of a NAT gateway.
:example: Show details of a NAT gateway.
az network nat gateway show --resource-group MyResourceGroup --name MyNatGateway
:example: Show NAT gateway using ID.
az network nat gateway show --ids {GatewayId}
"""
_aaz_info = {
"version": "2022-01-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/natgateways/{}", "2022-01-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the NAT gateway.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NatGatewaysGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class NatGatewaysGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"natGatewayName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-01-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType()
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.sku = AAZObjectType()
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.zones = AAZListType()
properties = cls._schema_on_200.properties
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_addresses = AAZListType(
serialized_name="publicIpAddresses",
)
properties.public_ip_prefixes = AAZListType(
serialized_name="publicIpPrefixes",
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
public_ip_addresses = cls._schema_on_200.properties.public_ip_addresses
public_ip_addresses.Element = AAZObjectType()
_ShowHelper._build_schema_sub_resource_read(public_ip_addresses.Element)
public_ip_prefixes = cls._schema_on_200.properties.public_ip_prefixes
public_ip_prefixes.Element = AAZObjectType()
_ShowHelper._build_schema_sub_resource_read(public_ip_prefixes.Element)
subnets = cls._schema_on_200.properties.subnets
subnets.Element = AAZObjectType()
_ShowHelper._build_schema_sub_resource_read(subnets.Element)
sku = cls._schema_on_200.sku
sku.name = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
zones = cls._schema_on_200.zones
zones.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_sub_resource_read = None
@classmethod
def _build_schema_sub_resource_read(cls, _schema):
if cls._schema_sub_resource_read is not None:
_schema.id = cls._schema_sub_resource_read.id
return
cls._schema_sub_resource_read = _schema_sub_resource_read = AAZObjectType()
sub_resource_read = _schema_sub_resource_read
sub_resource_read.id = AAZStrType()
_schema.id = cls._schema_sub_resource_read.id
__all__ = ["Show"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
0749a6002445ed54ada4865f9d8ead88709f8e04 | 29487730c77ae4e875ed8f69d3aceef2103fc0a3 | /fatcatmap/models/abstract/event.py | c93f570669e0aaedbbc3a8c0174e8256b06fac65 | [] | no_license | sgammon/fatcatmap | 154665fdd9394e0802d1bce9d0d747f3f9bfe3b2 | 4c83d1bfc146f48ac2d1d6b240624a7ece57911e | refs/heads/master | 2021-03-30T15:41:15.837040 | 2014-12-09T20:38:50 | 2014-12-09T20:38:50 | 15,307,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | # -*- coding: utf-8 -*-
'''
fcm: abstract event models
'''
# stdlib
from datetime import datetime
# graph models
from .. import (Model,
describe)
@describe(abstract=True)
class Event(Model):
''' Specifies an action that occurred at a specific (or
vague) single moment in time. '''
occurred = datetime, {'indexed': True}
| [
"sam@momentum.io"
] | sam@momentum.io |
311119c4a533fb84b1644a51b05a305f2728fa5b | bd58eb56167680e5c07c1e6e630c03f2f73a3647 | /replace_downloads/replace_downloads.py | ab5264c1faddd4658ae864b61571d3c4340698b5 | [] | no_license | denier1025/PycharmProjects | 40e7ac879993e2e4b0ad7b0c547790a2554406eb | 411f8ae04c4ba4f6835d2cd66b5490223a863b2f | refs/heads/master | 2020-03-27T06:14:39.127989 | 2018-09-10T19:51:24 | 2018-09-10T19:51:24 | 146,091,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,671 | py | #!/usr/bin/env python
import netfilterqueue
import scapy.all as scapy
import subprocess
import argparse
ack_list = []
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-c0", "--chain0", dest="chain_name0", help="Chain name: FORWARD, OUTPUT, INPUT etc.")
parser.add_argument("-c1", "--chain1", dest="chain_name1", help="Chain name: FORWARD, OUTPUT, INPUT etc.")
parser.add_argument("-qn", "--queue-num", dest="queue_num", help="Queue number: 0, 1, 3 etc.")
options = parser.parse_args()
if not options.chain_name0:
parser.error("Please, specify a chain name, use --help for more info")
elif not options.queue_num:
parser.error("Please, specify a queue number, use --help for more info")
else:
if ("OUTPUT" or "INPUT") == options.chain_name0:
if not options.chain_name1:
parser.error("Please, specify a chain name, use --help for more info")
return options
def presets_for_intercept_and_modify_packets(options):
if options.chain_name1:
subprocess.call(["iptables", "-I", options.chain_name1, "-j", "NFQUEUE", "--queue-num", options.queue_num])
subprocess.call(["iptables", "-I", options.chain_name0, "-j", "NFQUEUE", "--queue-num", options.queue_num])
def flush_presets():
subprocess.call("iptables --flush", shell=True)
def set_load_link(packet, load_link):
packet[scapy.Raw].load = "HTTP/1.1 301 Moved Permanently\r\nLocation: " + load_link + "\r\n\r\n"
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(packet):
scapy_packet = scapy.IP(packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
if scapy_packet[scapy.TCP].dport == 80:
if ".exe" in scapy_packet[scapy.Raw].load:
print("exe Request")
ack_list.append(scapy_packet[scapy.TCP].ack)
elif scapy_packet[scapy.TCP].sport == 80:
if scapy_packet[scapy.TCP].seq in ack_list:
ack_list.remove(scapy_packet[scapy.TCP].seq)
print("Replacing file")
modified_packet = set_load_link(scapy_packet, "http://10.0.2.15/files/evil.exe")
packet.set_payload(str(modified_packet))
packet.accept()
options = get_args()
presets_for_intercept_and_modify_packets(options)
try:
queue = netfilterqueue.NetfilterQueue()
queue.bind(int(options.queue_num), process_packet)
queue.run()
except KeyboardInterrupt:
print("\nDetecting 'CTRL+C'... Flushing IP-tables... Please wait...")
flush_presets()
print("IP-tables were flushing successfully!") | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
f55d7cfc44bac29fc91d4cf46061f70481c026ed | e0b6f5bd451aa8af3273fbc948799637681342e1 | /scripts/wm_representation/functions/IEM/tools/Weights_matrix_LM_3items.py | 423fd194f89fcea7ee5da1725dfd4a8d6bacfb41 | [] | no_license | davidbestue/encoding | 6b304f6e7429f94f97bd562c7544d1fdccf7bdc1 | c27319aa3bb652b3bfc6b7340044c0fda057bc62 | refs/heads/master | 2022-05-05T23:41:42.419252 | 2022-04-27T08:34:52 | 2022-04-27T08:34:52 | 144,248,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 13:05:33 2019
@author: David
"""
import sys, os
path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
sys.path.insert(1, path_tools)
from tools import *
def Weights_matrix_LM_3items( training_data, training_angles ):
# no intercept
# no regressors scaling
# training_angles is a vector of [a1, a2, a3]
#####
start_train_weights = time.time()
#####
n_voxels = np.shape(training_data)[1]
### Expected activity from the model
M_model=[] #matrix of the activity from the model
for i in range(len(training_angles)):
channel_values1=f(training_angles[i][0]) #f #f_quadrant (function that generates the expectd reponse in each channel)
channel_values2=f(training_angles[i][1])
channel_values3=f(training_angles[i][2])
channel_values = np.array(channel_values1) + np.array(channel_values2) + np.array(channel_values3)
channel_values = list(channel_values)
M_model.append(channel_values)
M_model=pd.DataFrame(np.array(M_model)) # (trials, channel_activity)
channel_names = ['ch_' +str(i+1) for i in range(0, len(pos_channels))] #names of the channels
M_model.columns=channel_names
#### 2. Train the model and get matrix of weights
Matrix_weights=np.zeros(( n_voxels, len(pos_channels) )) # (voxels, channels) how each channels is represented in each voxel
for voxel_x in range(0, n_voxels): #train each voxel
# set Y and X for the GLM
Y = training_data[:, voxel_x] ## Y is the real activity
X = M_model #_zscored #M_model ## X is the hipothetycal activity
##
a = sm.OLS(Y, X )
resul = a.fit()
betas= resul.params
Matrix_weights[voxel_x, :]=betas
#Save the matrix of weights
Matrix_weights =pd.DataFrame(Matrix_weights) #convert the array to dataframe
end_train_weights = time.time()
process_train_weights = end_train_weights - start_train_weights
##print( 'Time train Weights: ' +str(process_train_weights))
Inter = False #intercept true or false
return Matrix_weights, Inter
| [
"davidsanchezbestue@hotmail.com"
] | davidsanchezbestue@hotmail.com |
7346dc2a4751f2adbec6fe72f886800613002c7e | b1bab1dca289b2447c0b7a6b2e41f6e61fc048df | /TEST/A형모의삼국지.py | 0f97c63d8257949edae6a7aa5415e57818df7f28 | [] | no_license | DongChanKIM2/Algorithm | bf267ddf4234f0da6552a559f43cac073e7ca133 | 2613462f65a2cda7e32c6f96cee2cd67733c9883 | refs/heads/main | 2023-08-24T10:46:12.330145 | 2021-10-11T06:31:56 | 2021-10-11T06:31:56 | 356,710,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,469 | py | #1. 땅따먹기 게임
# Rule 1. 산악지대는 무조건 0
# Rule 2. 주병력/보조병력으로 나뉘어져 이씀
# Rule 3. 공격 -> 지원 -> 보충 (1번나라 -> 2번나라 -> 3번나라) 병력이 없다면 skip
# Rule 4. 공격: 내 공격 turn일때 (인접한 상대 나라의 병력 * 5) < (내 나라 병력) 이면 1/4씩 보내서 공격
# 즉 무조건 침공가능할때만 공격하는 것임
# Rule 5. 지원:
# 5-1. 인접한 국가 중 다른나라가 없으면 인접한 지역으로 1/5 병력 보냄
# 5-2. 인접한 국가 중 다른나라가 있으면 5배 벙력 초과일때만 1/5 병력 보냄
# Rule 6.게임종료: 한 국가만 남았을 때
# 게임순서: 파 -> 초 -> 주
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
# 순서대로 공격하면 공격인원이 나뉘게 됨
# 1. 적군기준으로 사방향탐색해서 공격하는 곳인지 정하고 (attack 함수)
# 2. 아군기준으로 공격을 한번에 해야함 (adjacent_cnt arr)
def attack(which_country):
for i in range(N):
for j in range(N):
conquerd_arr[i][j] = 0
for i in range(N):
for j in range(N):
# 산악지대가아닌 적군의 위치를 찾자
if arr[i][j] != which_country and arr[i][j] != 0:
cnt = 0
for direction in range(4):
nx = i + dx[direction]
ny = j + dy[direction]
if nx >= N or ny >= N or nx < 0 or ny < 0:
continue
# 적군의 위치에서 사방탐색을 해서 아군의 숫자 찾기
if arr[nx][ny] == which_country:
cnt += military[nx][ny]
if cnt > (military[i][j] * 5):
conquerd_arr[i][j] = 1
def send(which_country):
for i in range(N):
for j in range(N):
adjacent_cnt_arr[i][j] = 0
# 병력을 보내는 지점에 몇 번 병력을 보내는지 cnt
for i in range(N):
for j in range(N):
if conquerd_arr[i][j] == 1:
for direction in range(4):
nx = i + dx[direction]
ny = j + dy[direction]
if nx >= N or ny >= N or nx < 0 or ny < 0:
continue
if arr[nx][ny] == which_country:
adjacent_cnt_arr[nx][ny] += 1
# 실제 침공부터 하자
for i in range(N):
for j in range(N):
if conquerd_arr[i][j] == 1:
send_military_cnt = 0
for direction in range(4):
nx = i + dx[direction]
ny = j + dy[direction]
if nx >= N or ny >= N or nx < 0 or ny < 0:
continue
if adjacent_cnt_arr[nx][ny] > 0:
send_military_cnt += int(military[nx][ny] * (1/4))
military[i][j] = send_military_cnt - military[i][j]
# 나라도 바꿔주고
arr[i][j] = which_country
# 병력도 바꿔주고
military[i][j] = int(military[i][j])
# 보낸 병력도 적용해야지
for i in range(N):
for j in range(N):
if adjacent_cnt_arr[i][j] > 0:
military[i][j] -= adjacent_cnt_arr[i][j] * int(military[i][j] * (1/4))
# int(((adjacent_cnt_arr[i][j]/4) * military[i][j]))
military[i][j] = int(military[i][j])
adjacent_cnt_arr[i][j] = 0
conquerd_arr[i][j] = 0
# 지원부터 하자(이거하고나서 보충)
# Rule 5. 지원:
# 5-1. 인접한 국가 중 다른나라가 없으면 인접한 지역으로 1/5 병력 보냄
# 5-2. 인접한 국가 중 다른나라가 있으면 5배 벙력 초과일때만 1/5 병력 보냄
# 아... 생각해보니까 5-1, 5-2를 순차적으로 진행하면 안되고 동시에 해야된다.. shit...
# temp_military를 만들어서 5-1, 5-2의 차이를 더해주고 temp_military를 합치는 방향으로 가는게 맞겟다
def advocate(which_country):
for i in range(N):
for j in range(N):
temp_military[i][j] = 0
# 5-1부터 구현
for i in range(N):
for j in range(N):
if arr[i][j] == which_country:
flag = 0
for direction in range(4):
nx = i + dx[direction]
ny = j + dy[direction]
if nx >= N or ny >= N or nx < 0 or ny < 0:
continue
if arr[nx][ny] == 0:
continue
if arr[nx][ny] != arr[i][j]:
flag = 1
# 사방에 아군밖에 없는 경우
cnt = 0
if flag == 0:
# 병력을 다 보내고
for direction in range(4):
nx = i + dx[direction]
ny = j + dy[direction]
if nx >= N or ny >= N or nx < 0 or ny < 0:
continue
if arr[nx][ny] == which_country:
temp_military[nx][ny] += int(military[i][j] * 0.2)
cnt += 1
# 내꺼에서 병력을 빼주자
temp_military[i][j] -= int(military[i][j] * 0.2) * cnt
# print('---', int(military[i][j] * (0.2)))
# temp_military[i][j] = int(temp_military[i][j])
# 사방중에 적군이 있는 경우
elif flag == 1:
for direction in range(4):
nx = i + dx[direction]
ny = j + dy[direction]
if nx >= N or ny >= N or nx < 0 or ny < 0:
continue
if arr[nx][ny] == which_country:
if military[i][j] > military[nx][ny] * 5:
temp_military[nx][ny] += int(military[i][j] * 0.2)
# temp_military[nx][ny] = int(temp_military[nx][ny])
temp_military[i][j] -= int(military[i][j] * 0.2)
# temp_military[i][j] = int(temp_military[i][j])
for i in range(N):
for j in range(N):
military[i][j] += temp_military[i][j]
def suppling():
for i in range(N):
for j in range(N):
military[i][j] += supply[i][j]
# 파: 1, 초: 2, 주: 3
T = int(input())
for tc in range(T):
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
military = [list(map(int, input().split())) for _ in range(N)]
supply = [list(map(int, input().split())) for _ in range(N)]
# 침략당했다라고 표시할 arr
conquerd_arr = [[0] * N for _ in range(N)]
# 침략하려고 공격을 보내는 지점 표시할 arr
adjacent_cnt_arr = [[0] * N for _ in range(N)]
# 지원보낼때 적군과 인접한 곳과 아닌곳 합쳐줄 arr
temp_military = [[0] * N for _ in range(N)]
ans = 0
temp = 0
first = 0
second = 0
third = 0
zero = 0
while True:
if first + zero == N ** 2:
break
if second + zero == N ** 2:
break
if third + zero == N ** 2:
break
first = 0
second = 0
third = 0
zero = 0
execute = 0
temp += 1
if temp >= 4:
temp -= 3
idx = temp
for i in range(N):
for j in range(N):
if arr[i][j] == idx:
execute = 1
if execute == 1:
attack(idx)
send(idx)
advocate(idx)
suppling()
for i in range(N):
for j in range(N):
if arr[i][j] == 0:
zero += 1
if arr[i][j] == 1:
first += 1
if arr[i][j] == 2:
second += 1
if arr[i][j] == 3:
third += 1
# print(conquerd_arr)
# print(adjacent_cnt_arr)
# print(arr)
# print(military)
# print(temp_military)
for i in range(N):
for j in range(N):
ans += military[i][j]
print('#{} {}'.format(tc+1, ans))
# print(sum(military))
| [
"fromecha@gmail.com"
] | fromecha@gmail.com |
86dcef936a9635a57846cbb64347fec7aede84f9 | 562446b9c0b39dd74a079678fab235ad7f9b8fb2 | /enaml/wx/wx_group_box.py | cc12ceba9a849c5b309913381a5286bd49ec7dbe | [
"BSD-3-Clause"
] | permissive | jminardi/enaml | 2679bbb992101e07b1d780bd622c5ccb08cb329e | 78446e5163e026d9abf9568f99252615e9e2e380 | refs/heads/master | 2021-01-23T22:37:54.033419 | 2014-07-22T02:30:21 | 2014-07-22T02:30:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,127 | py | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import wx
from atom.api import Typed
from enaml.widgets.group_box import ProxyGroupBox
from .wx_container import WxContainer, wxContainer
WX_ALIGNMENTS = {
'left': wx.ALIGN_LEFT,
'center': wx.ALIGN_CENTER,
'right': wx.ALIGN_RIGHT,
}
class wxGroupBox(wxContainer):
""" A wxContainer sublcass that implements GroupBox functionality.
"""
def __init__(self, *args, **kwargs):
""" Initialize a wxGroupBox.
Parameters
----------
*args, **kwargs
The positional and keyword arguments to initialize a
wxContainer.
"""
super(wxGroupBox, self).__init__(*args, **kwargs)
self._title = ''
self._border = wx.StaticBox(self)
self._line = wx.StaticLine(self)
self._label = wx.StaticText(self)
self._label.Raise()
self._label_size = self._label.GetBestSize()
self._title_alignment = wx.ALIGN_LEFT
self._flat = False
# Set the panel to double buffered or suffer terrible
# rendering artifacts
self.SetDoubleBuffered(True)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def GetAlignment(self):
""" Return the wx alignment flag for the current alignment
of the group box title.
"""
return self._title_alignment
def SetAlignment(self, alignment):
""" Set the alignment of the title of the group box. Should
be one of wx.ALIGN_LEFT, wx.ALIGN_RIGHT, wx.ALIGN_CENTER.
"""
self._title_alignment = alignment
self._update_layout()
def GetFlat(self):
""" Returns a boolean indicating whether the group box is using
a flat style.
"""
return self._flat
def SetFlat(self, flat):
""" Set whether or not the group box should be displayed using
a flat style.
"""
self._flat = flat
if flat:
self._border.Show(False)
self._line.Show(True)
else:
self._border.Show(True)
self._line.Show(False)
self._update_layout()
def GetTitle(self):
""" Return the current title text in the group box.
"""
# Undo the hack applied in SetTitle(...)
title = self._title
if title:
title = title[1:-1]
return title
def SetTitle(self, title):
""" Set the current title text in the group box.
"""
# A bit of a hack to give us a little padding around the label
if title:
title = ' %s ' % title
self._title = title
self._label.SetLabel(title)
self._label_size = self._label.GetBestSize()
if not title:
self._label.Show(False)
else:
self._label.Show(True)
self._update_layout()
def SetDimensions(self, x, y, width, height):
""" Overridden parent class method to synchronize the group
box decorations.
"""
super(wxGroupBox, self).SetDimensions(x, y, width, height)
self._update_layout()
def SetSize(self, size):
""" Overridden parent class method to synchronize the group
box decorations.
"""
super(wxGroupBox, self).SetSize(size)
self._update_layout()
def GetContentsMargins(self):
""" Get the contents margins for the group box.
These margins are computed empirically so that they look similar
to the margins provided by Qt on Windows.
Returns
-------
result : tuple
The top, right, bottom, and left margin values.
"""
label = self._label
height = label.GetCharHeight()
if not label.IsShown():
height /= 2
return (height, 1, 1, 1)
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _update_layout(self):
""" Synchronizes the drawing of the group box decorations with
the panel.
"""
if self._flat:
self._update_line_geometry()
else:
self._update_border_geometry()
self._update_title_geometry()
self.Refresh()
def _update_border_geometry(self):
""" Updates the geometry of the border.
"""
width, height = self.GetSizeTuple()
self._border.SetSizeWH(width, height)
def _update_line_geometry(self):
""" Updates the geometry of the line.
"""
y = self._label_size.GetHeight() / 2
width, _ = self.GetSizeTuple()
self._line.SetDimensions(0, y, width, 2)
def _update_title_geometry(self):
""" Updates the geometry of the title.
"""
label = self._label
flat = self._flat
align = self._title_alignment
text_width, _ = self._label_size
width, _ = self.GetSizeTuple()
# These offsets are determined empirically to look similar
# in form to Qt on Windows
if align == wx.ALIGN_LEFT:
x = 0 if flat else 8
label.Move((x, 0))
elif align == wx.ALIGN_RIGHT:
right = width
right -= 0 if flat else 8
x = right - text_width
label.Move((x, 0))
elif align == wx.ALIGN_CENTER:
label.CenterOnParent(dir=wx.HORIZONTAL)
else:
raise ValueError('Invalid title alignment %s' % align)
class WxGroupBox(WxContainer, ProxyGroupBox):
""" A Wx implementation of an Enaml ProxyGroupBox.
"""
#: A reference to the widget created by the proxy.
widget = Typed(wxGroupBox)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Creates the underlying QGroupBox control.
"""
self.widget = wxGroupBox(self.parent_widget())
def init_widget(self):
""" Initialize the underlying widget.
"""
super(WxGroupBox, self).init_widget()
d = self.declaration
self.set_title(d.title, cm_update=False)
self.set_flat(d.flat)
self.set_title_align(d.title_align)
#--------------------------------------------------------------------------
# Layout Handling
#--------------------------------------------------------------------------
@staticmethod
def margins_func(widget):
""" Get the current contents margins for the group box.
"""
return widget.GetContentsMargins()
#--------------------------------------------------------------------------
# ProxyGroupBox API
#--------------------------------------------------------------------------
def set_title(self, title, cm_update=True):
""" Update the title of the group box.
"""
if not cm_update:
self.widget.SetTitle(title)
return
widget = self.widget
old_margins = widget.GetContentsMargins()
widget.SetTitle(title)
new_margins = widget.GetContentsMargins()
if old_margins != new_margins:
self.margins_updated()
def set_flat(self, flat):
""" Updates the flattened appearance of the group box.
"""
self.widget.SetFlat(flat)
def set_title_align(self, align):
""" Updates the alignment of the title of the group box.
"""
wx_align = WX_ALIGNMENTS[align]
self.widget.SetAlignment(wx_align)
| [
"sccolbert@gmail.com"
] | sccolbert@gmail.com |
f43b483dfeb3751246fc8febb721f04195a9a24a | 7137161629a1003583744cc3bd0e5d3498e0a924 | /airflow/providers/amazon/aws/example_dags/example_eks_using_defaults.py | 12a80fb4e6e270cea1e79d99caefb113099997a8 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | jbampton/airflow | 3fca85975854eb916f16143b659a9119af143963 | dcfa14d60dade3fdefa001d10013466fe4d77f0d | refs/heads/master | 2023-05-25T22:31:49.104069 | 2021-09-18T19:18:32 | 2021-09-18T19:18:32 | 247,645,744 | 3 | 0 | Apache-2.0 | 2020-03-16T08:12:58 | 2020-03-16T08:12:57 | null | UTF-8 | Python | false | false | 3,917 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os import environ
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.hooks.eks import ClusterStates, NodegroupStates
from airflow.providers.amazon.aws.operators.eks import (
EKSCreateClusterOperator,
EKSDeleteClusterOperator,
EKSPodOperator,
)
from airflow.providers.amazon.aws.sensors.eks import EKSClusterStateSensor, EKSNodegroupStateSensor
from airflow.utils.dates import days_ago
CLUSTER_NAME = 'eks-demo'
NODEGROUP_SUFFIX = '-nodegroup'
NODEGROUP_NAME = CLUSTER_NAME + NODEGROUP_SUFFIX
ROLE_ARN = environ.get('EKS_DEMO_ROLE_ARN', 'arn:aws:iam::123456789012:role/role_name')
SUBNETS = environ.get('EKS_DEMO_SUBNETS', 'subnet-12345ab subnet-67890cd').split(' ')
VPC_CONFIG = {
'subnetIds': SUBNETS,
'endpointPublicAccess': True,
'endpointPrivateAccess': False,
}
with DAG(
dag_id='example_eks_using_defaults_dag',
schedule_interval=None,
start_date=days_ago(2),
max_active_runs=1,
tags=['example'],
) as dag:
# [START howto_operator_eks_create_cluster_with_nodegroup]
# Create an Amazon EKS cluster control plane and an EKS nodegroup compute platform in one step.
create_cluster_and_nodegroup = EKSCreateClusterOperator(
task_id='create_eks_cluster_and_nodegroup',
cluster_name=CLUSTER_NAME,
nodegroup_name=NODEGROUP_NAME,
cluster_role_arn=ROLE_ARN,
nodegroup_role_arn=ROLE_ARN,
# Opting to use the same ARN for the cluster and the nodegroup here,
# but a different ARN could be configured and passed if desired.
resources_vpc_config=VPC_CONFIG,
# Compute defaults to 'nodegroup' but is called out here for the purposed of the example.
compute='nodegroup',
)
# [END howto_operator_eks_create_cluster_with_nodegroup]
await_create_nodegroup = EKSNodegroupStateSensor(
task_id='wait_for_create_nodegroup',
cluster_name=CLUSTER_NAME,
nodegroup_name=NODEGROUP_NAME,
target_state=NodegroupStates.ACTIVE,
)
start_pod = EKSPodOperator(
task_id="run_pod",
pod_name="run_pod",
cluster_name=CLUSTER_NAME,
image="amazon/aws-cli:latest",
cmds=["sh", "-c", "ls"],
labels={"demo": "hello_world"},
get_logs=True,
# Delete the pod when it reaches its final state, or the execution is interrupted.
is_delete_operator_pod=True,
)
# [START howto_operator_eks_force_delete_cluster]
# An Amazon EKS cluster can not be deleted with attached resources.
# Setting the `force` to `True` will delete any attached resources before deleting the cluster.
delete_all = EKSDeleteClusterOperator(
task_id='delete_nodegroup_and_cluster', cluster_name=CLUSTER_NAME, force_delete_compute=True
)
# [END howto_operator_eks_force_delete_cluster]
await_delete_cluster = EKSClusterStateSensor(
task_id='wait_for_delete_cluster',
cluster_name=CLUSTER_NAME,
target_state=ClusterStates.NONEXISTENT,
)
create_cluster_and_nodegroup >> await_create_nodegroup >> start_pod >> delete_all >> await_delete_cluster
| [
"noreply@github.com"
] | jbampton.noreply@github.com |
81f504ac97ca0003faf28ebe4384c00b0c873734 | adc1dea2a624af5a3d0564083d521942db77e5cf | /knn_cb_popularity_single.py | 3cfaf45ca93d5b4e167325cc09b7adac20e18674 | [] | no_license | Hadryan/Music_recommender-2 | 7052ac999daf039b2c269ce30a19cf57a5bde3e4 | a92ec61eef679b5b848fa2199ff613412db43c0d | refs/heads/main | 2023-04-24T01:11:57.877872 | 2021-05-25T17:47:28 | 2021-05-25T17:47:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,589 | py | # -*- coding: utf-8 -*-
"""
KNN Content-Based Recommendation
"""
# data science imports
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import sklearn.preprocessing as skpp
# Scale and normalize data
def scale_norm(data_numerical):
# Remove row if there's NA values and convert to numpy array
data_knn = data_numerical.dropna(axis = 0, how = 'any')
data_knn = np.asarray(data_knn)
# scale data
data_knn = skpp.scale(data_knn, axis = 0)
# normalize data
stdA = np.std(data_knn,axis = 0)
stdA = skpp.normalize(stdA.reshape(1,-1)) # the normalize is different from MATLAB's
data_knn_scale = data_knn @ np.diag(np.ones(stdA.shape[1])/stdA[0])
# extract attributes from raw data
m,n = data_knn_scale.shape
# print('size of the data:',m,n)
return data_knn_scale
# Knn Recommendation
def knn_rank(model_knn, seed_track, data_id, data_knn_scale, num_neighbor):
distances, indices = model_knn.kneighbors(seed_track, n_neighbors = num_neighbor + 1)
# get list of raw idx of recommendations
raw_recommends = data_id.iloc[indices[0][1:],]
result = raw_recommends
return result, distances, indices
def split_numerical_id(track_data):
# Extract the numerical features only, for KNN analysis
data_numerical = track_data[['acousticness','danceability','energy','instrumentalness','key','liveness','loudness','mode','speechiness','tempo','valence','time_signature']]
# Extract the identidy data only, for outputs
data_id = track_data[['album_name','album_uri','artist_name','artist_uri','track_name','track_uri']]
return data_numerical, data_id
def get_knn_recommend(model_knn, seed_id, data_id, data_knn_scale, num_neighbor):
# Predict using KNN
seed_num = data_id[data_id.track_uri == seed_id].index[0]
seed_vector = data_knn_scale[seed_num].reshape(1, -1)
# Predict recommendation using KNN
result, distance, indices = knn_rank(model_knn, seed_vector, data_id, data_knn_scale, num_neighbor)
return result, distance
def knn_run(seed_id, num_neighbor):
# Read Data and drop duplicated track_uris
total_data = pd.read_csv('data_sample.csv').drop_duplicates('track_uri').reset_index(drop = True)
# split the input data into numerical features and identity features
data_numerical, data_id = split_numerical_id(total_data)
# scale and normalize the data
data_knn_scale = scale_norm(data_numerical)
data_id = data_id.reset_index(drop = True)
# print(data_knn_scale.shape)
# print(data_id.shape)
print('Input Seed:', data_id[data_id['track_uri']==seed_id][['artist_name', 'track_name']])
# Model Training
model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=num_neighbor, n_jobs=-1)
model_knn.fit(data_knn_scale)
# Model Predicting
result, distance = get_knn_recommend(model_knn, seed_id, data_id, data_knn_scale, num_neighbor)
result = result.reset_index(drop=True)
score = pd.DataFrame(1 - distance).T[1:].reset_index(drop=True)
output = pd.concat([result, score], axis =1).rename(columns={0:'score'})
return output
if __name__ == '__main__':
# Sample seed track_uri
seed_id = '6dr6QeqH62tYUiPezRbinq'
# Specify the number of neighbors for output
num_neighbor = 100
output = knn_run(seed_id, num_neighbor)
| [
"noreply@github.com"
] | Hadryan.noreply@github.com |
49eba96374a4a38078f28ee469f2bb8d525d67ea | 14e7058adf766352a0b90b66b7dcf887105a481c | /portal/messages/models.py | c11765e46bf0cd904a86461b20cf043a9c9591c9 | [
"BSD-2-Clause"
] | permissive | brunogamacatao/portalsaladeaula | 2b7f07f07c2518dd359f043483fbb27417f62aaf | 9429e485aa37ffea3208339a807032e9230a3c84 | refs/heads/master | 2020-12-29T01:42:18.594281 | 2012-06-22T12:24:44 | 2012-06-22T12:24:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,882 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
__author__ = 'brunocatao'
import datetime
import logging
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.template import Context
from django.template.loader import get_template
from django.core.mail import EmailMultiAlternatives
from portal.models import UserInfo
class MessageManager(models.Manager):
def for_model(self, model):
"""
QuerySet for all updates for a particular model (either an instance or a class).
"""
ct = ContentType.objects.get_for_model(model)
qs = self.get_query_set().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_unicode(model._get_pk_val()))
return qs
def get_replies(self, user):
qs = self.get_query_set().filter(author=user, is_reply=False)
messages = [msg for msg in list(qs.all()) if msg.replies.count() > 0]
return sorted(messages, key=attrgetter('earlier_date'), reverse=True)
class Message(models.Model):
subject = models.CharField(blank=False, max_length=100)
text = models.TextField(blank=False)
date_published = models.DateTimeField(default=datetime.datetime.now)
author = models.ForeignKey(User, blank=False)
is_reply = models.NullBooleanField(blank=True, null=True)
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_pk = models.CharField(_('object ID'), max_length=100)
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
objects = MessageManager()
def get_earlier_date(self):
earlier_date = self.date_published
if self.replies:
for reply in self.replies.all():
if reply.child.date_published > earlier_date:
earlier_date = reply.child.date_published
return earlier_date
earlier_date = property(get_earlier_date)
def fill_date_published(sender, instance, **kw):
if not instance.date_published:
instance.date_published = datetime.datetime.now()
models.signals.pre_save.connect(fill_date_published, sender=Message)
def invalidate_cache(sender, instance, **kw):
target = instance.content_type.get_object_for_this_type(pk=instance.object_pk)
target.messages_cache = None
target.save()
models.signals.pre_save.connect(invalidate_cache, sender=Message)
class ReplyRelationship(models.Model):
parent = models.ForeignKey(Message, related_name="replies")
child = models.ForeignKey(Message, related_name="parent")
#Signal processing
import traceback
from portal.messages.signals import massage_was_posted
def message_notification(sender, **kwargs):
message = kwargs['message']
target = message.content_type.get_object_for_this_type(pk=message.object_pk)
text = u'%s postou, %s:' % (message.author.get_profile().name, message.subject, )
text += '\n%s' % message.text
ctx = {
'mensagem': text,
'link': 'http://www.portalsaladeaula.com%s' % target.get_absolute_url(),
}
subject = message.subject
from_email = 'Portal Sala de Aula <gerencia@portalsaladeaula.com>'
text_content = get_template('emails/update.txt').render(Context(ctx))
html_content = get_template('emails/update.html').render(Context(ctx))
if isinstance(target, UserInfo) :
mail_to.append(target.email)
else:
if target.get_students():
for student in target.get_students():
msg = EmailMultiAlternatives(subject, text_content, from_email, [student.email, ])
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except:
logging.error('Não foi possível enviar o email')
traceback.print_exc()
if target.get_teachers():
for teacher in target.get_teachers():
msg = EmailMultiAlternatives(subject, text_content, from_email, [teacher.email, ])
msg.attach_alternative(html_content, "text/html")
try:
msg.send()
except:
logging.error('Não foi possível enviar o email')
traceback.print_exc()
massage_was_posted.connect(message_notification)
class Attachment(models.Model):
file = models.FileField(_('Attachment'), blank=False, upload_to='uploads/%Y/%m/%d/%H/%M/%S/')
message = models.ForeignKey(Message, blank=True, null=True) | [
"brunogamacatao@gmail.com"
] | brunogamacatao@gmail.com |
e38cc17d71bc92be9695f5c69c0a8dd781ea878d | 4ed3db861ae2fe727c7be604d42d540a00923320 | /samsung_multiroom/__init__.py | f5eaa8a8e92a00784ea09bf85a26da5fad2d46e8 | [
"MIT"
] | permissive | kusma/samsung_multiroom | 7cac147283a52bf491d7f50a6569c64de53eb4a5 | 09ca86d27b87a4aa0c97ec2accbd4ec67dd0cc61 | refs/heads/master | 2020-12-04T07:46:19.688568 | 2019-04-20T16:29:44 | 2019-04-20T16:29:44 | 231,683,383 | 0 | 0 | MIT | 2020-01-03T23:47:29 | 2020-01-03T23:47:28 | null | UTF-8 | Python | false | false | 254 | py | """
Init.
"""
# pylint: disable=C0103
from . import factory
from . import discovery
from .service import REPEAT_ONE, REPEAT_ALL, REPEAT_OFF
# aliases
SamsungMultiroomSpeaker = factory.speaker_factory
SamsungSpeakerDiscovery = discovery.SpeakerDiscovery
| [
"k.galutowski@gmail.com"
] | k.galutowski@gmail.com |
1952570b90f1800528a3b78b0f70da2e1c2b2478 | a8be4698c0a43edc3622837fbe2a98e92680f48a | /Programmers/Hash/위장.py | 7916695309725c283e78994ea36da5b2f1786f25 | [] | no_license | blueboy1593/algorithm | fa8064241f7738a12b33544413c299e7c1e1a908 | 9d6fdd82b711ba16ad613edcc041cbecadd85e2d | refs/heads/master | 2021-06-23T22:44:06.120932 | 2021-02-21T10:44:16 | 2021-02-21T10:44:16 | 199,543,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from collections import defaultdict
def solution(clothes):
answer = 1
clothes_dict = defaultdict(list)
for cloth in clothes:
a, b = cloth
clothes_dict[b].append(a)
arr = []
for value in clothes_dict.values():
arr.append(len(value))
for ar in arr:
answer *= ar + 1
answer -= 1
return answer
solution([['yellow_hat', 'headgear'], ['blue_sunglasses', 'eyewear'], ['green_turban', 'headgear']]) | [
"snb0303@naver.com"
] | snb0303@naver.com |
f278e0b8a688262bf51f4581909942235236acfe | 116fa33c52c561b86cee2a43c6f3d18ff6496df5 | /setup.py | fba9817d3fbe164062eb48fff629060326c8b584 | [
"Apache-2.0"
] | permissive | famousthom/overseer | fc3c276a9b53c3fa56ab30fbf35341177f1e1a43 | 69e7a229450c4eed8721e5cfce6a98c708b50a95 | refs/heads/master | 2020-12-24T17:53:43.633263 | 2011-06-04T17:59:40 | 2011-06-04T17:59:40 | 1,847,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | #!/usr/bin/env python
try:
from setuptools import setup, find_packages
from setuptools.command.test import test
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from setuptools.command.test import test
class mytest(test):
def run(self, *args, **kwargs):
from runtests import runtests
runtests()
setup(
name='Overseer',
version='0.2.2',
author='DISQUS',
author_email='opensource@disqus.com',
url='http://github.com/disqus/overseer',
description = 'A status board built with Django',
packages=find_packages(),
zip_safe=False,
install_requires=[
'Django>=1.2.4',
'South',
'django-devserver',
'oauth2>=1.5.169',
'uuid',
],
test_suite = 'overseer.tests',
include_package_data=True,
cmdclass={"test": mytest},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
) | [
"dcramer@gmail.com"
] | dcramer@gmail.com |
4e035dbd22d936b9cb74cb2496fab16767ca84fd | 35c73e0b545f6ceb791994db6e6c8ce3624885b9 | /config/deprecated_211014/cmssw_privateMC_TagAndProbe_Bp_MuNuDstst.py | 2ee8082fbc51311a1df9a56ce480e891b62ca128 | [] | no_license | ocerri/BPH_RDntuplizer | edb26d466352b831e53d6f18d6d1980074686c9b | a4b157f5a64473bf3db360019d55fa2217199015 | refs/heads/master | 2022-06-27T06:33:32.566956 | 2022-06-09T01:59:46 | 2022-06-09T01:59:46 | 167,600,038 | 2 | 6 | null | 2022-06-03T02:04:49 | 2019-01-25T19:14:04 | C++ | UTF-8 | Python | false | false | 4,742 | py | import os, sys
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
from Configuration.StandardSequences.Eras import eras
process = cms.Process('BPHRDntuplizer', eras.Run2_2018)
# import of standard configurations
process.load('FWCore.MessageService.MessageLogger_cfi')
# Needed for transient track builder
# process.load('Configuration.StandardSequences.Services_cff')
# process.load('Configuration.EventContent.EventContent_cff')
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
# process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_upgrade2018_realistic_v12', '')
'''
############ Command line args ################
'''
args = VarParsing.VarParsing('analysis')
args.register('inputFile', '', args.multiplicity.list, args.varType.string, "Input file or template for glob")
args.outputFile = ''
args.parseArguments()
'''
##################### Input ###################
'''
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(args.maxEvents)
)
from glob import glob
if args.inputFile:
if len(args.inputFile) == 1 and '*' in args.inputFile[0]:
flist = glob(args.inputFile[0])
else:
flist = args.inputFile
elif args.inputFiles:
if len(args.inputFiles) == 1 and args.inputFiles[0].endswith('.txt'):
with open(args.inputFiles[0]) as f:
flist = [l[:-1] for l in f.readlines()]
else:
flist = args.inputFiles
else:
fdefault = os.environ['CMSSW_BASE'] + '/src/ntuplizer/BPH_RDntuplizer/production/'
fdefault += 'inputFiles_BP_Tag_Bp_MuNuDstst_Hardbbbar_evtgen_ISGW2_PUc0_10-2-3.txt'
with open(fdefault) as f:
flist = [l[:-1] for l in f.readlines()]
flist = flist[:20]
for i in range(len(flist)):
if os.path.isfile(flist[i]):
flist[i] = 'file:' + flist[i]
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(tuple(flist)),
inputCommands=cms.untracked.vstring('keep *',
'drop GenLumiInfoHeader_generator__SIM'),
skipBadFiles=cms.untracked.bool(True)
)
process.source.duplicateCheckMode = cms.untracked.string('noDuplicateCheck')
'''
##################### Output ###################
'''
if args.outputFile == '.root':
outname = 'TagAndProbe_Bp2MuNuDstst_Pip_CAND.root'
elif args.outputFile.startswith('_numEvent'):
outname = 'TagAndProbe_Bp2MuNuDstst_Pip_CAND' + args.outputFile
else:
outname = args.outputFile
process.TFileService = cms.Service("TFileService",
fileName = cms.string(outname),
closeFileFast = cms.untracked.bool(True)
)
'''
################# Sequence ####################
'''
process.trgF = cms.EDFilter("TriggerMuonsFilter",
muon_charge = cms.int32(1),
verbose = cms.int32(0)
)
process.B2MuDstDT = cms.EDProducer("TagAndProbeBp2DststMuProducer",
trgMuons = cms.InputTag("trgF","trgMuonsMatched", ""),
charge_muon = cms.int32(+1),
charge_K = cms.int32(+1),
charge_pi = cms.int32(-1),
charge_pip = cms.int32(+1),
verbose = cms.int32(0)
)
process.B2MuDstDTFilter = cms.EDFilter("B2DstMuDecayTreeFilter",
verbose = cms.int32(0)
)
cfg_name = os.path.basename(sys.argv[0])
f = open(os.environ['CMSSW_BASE']+'/src/ntuplizer/BPH_RDntuplizer/.git/logs/HEAD')
commit_hash = f.readlines()[-1][:-1].split(' ')[1]
process.outA = cms.EDAnalyzer("FlatTreeWriter",
cmssw = cms.string(os.environ['CMSSW_VERSION']),
cfg_name = cms.string(cfg_name),
commit_hash = cms.string(commit_hash),
verbose = cms.int32(0)
)
process.p = cms.Path(
process.trgF +
process.B2MuDstDT +
process.B2MuDstDTFilter+
process.outA
)
# DEBUG -- dump the event content
# process.output = cms.OutputModule(
# "PoolOutputModule",
# fileName = cms.untracked.string('edm_output.root'),
# )
# process.output_step = cms.EndPath(process.output)
#
# process.schedule = cms.Schedule(
# process.p,
# process.output_step)
'''
############# Overall settings ################
'''
process.MessageLogger.cerr.FwkReport.reportEvery = 100
| [
"olmo.cerri@gmail.com"
] | olmo.cerri@gmail.com |
227c7a0d62baf387c75ed7a38cd0abc217cc728d | 84156ccfb34bd133b4e050e07c165ffe4030c39e | /avocado/export/__init__.py | 7fd0c7ab061c1daa38661335d8e5e48817284ed1 | [
"BSD-3-Clause"
] | permissive | leipzig/avocado | ae4482585683585dd41e9e2ecd2d0e12e649aaa7 | 930bc77371d609e05691508e544e5d97a090cfd8 | refs/heads/master | 2021-01-16T19:59:05.140028 | 2012-12-15T01:49:38 | 2012-12-15T01:49:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from avocado.core import loader
from avocado.conf import OPTIONAL_DEPS
from _csv import CSVExporter
from _sas import SasExporter
from _r import RExporter
from _json import JSONExporter
from _html import HTMLExporter
registry = loader.Registry(register_instance=False)
registry.register(CSVExporter, 'csv')
registry.register(SasExporter, 'sas')
registry.register(RExporter, 'r')
registry.register(JSONExporter, 'json')
registry.register(HTMLExporter, 'html')
if OPTIONAL_DEPS['openpyxl']:
from _excel import ExcelExporter
registry.register(ExcelExporter, 'excel')
loader.autodiscover('exporters')
| [
"b@devel.io"
] | b@devel.io |
28bae8f3274478cae27342b125169091f3c11d2f | 935c60a8fa9a2f8d0efed9b1123a0a75e4c28250 | /censusreporter/apps/census/templatetags/sumlevs.py | 5eabc22d9189c83245fc75d7a4370fdf562a8e1b | [
"MIT"
] | permissive | censusreporter/censusreporter | a5299aebbec51d0508bdea6a90415ad2d724a2a5 | e8418d657b546482a80e92c6cd17332ab22c40c0 | refs/heads/master | 2023-08-31T23:10:43.506418 | 2023-08-25T16:05:46 | 2023-08-25T16:05:46 | 10,183,514 | 694 | 123 | MIT | 2023-02-15T19:03:00 | 2013-05-20T22:46:00 | HTML | UTF-8 | Python | false | false | 495 | py | from django import template
from census.utils import SUMMARY_LEVEL_DICT
register = template.Library()
@register.filter
def sumlev_name(sumlev):
if SUMMARY_LEVEL_DICT[sumlev]:
return SUMMARY_LEVEL_DICT[sumlev]['name']
return ''
@register.filter
def sumlev_name_plural(sumlev):
if SUMMARY_LEVEL_DICT[sumlev]:
return SUMMARY_LEVEL_DICT[sumlev]['plural']
return ''
@register.filter
def list_cut(itemlist, term):
return [ i for i in itemlist if not i == term ]
| [
"ryan.a.pitts@gmail.com"
] | ryan.a.pitts@gmail.com |
cfe688f0835102af0855fbac30fec0aad5293bcf | 0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02 | /abc237/b.py | 77a5fc4125ebfbad2fd73a68cdf2011e23c51827 | [] | no_license | silphire/atcoder | b7b02798a87048757745d99e8564397d1ca20169 | f214ef92f13bc5d6b290746d5a94e2faad20d8b0 | refs/heads/master | 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | h, w = map(int, input().split())
aa = [
list(map(int, input().split()))
for _ in range(h)
]
for x in range(w):
b = [aa[i][x] for i in range(h)]
print(' '.join(map(str, b))) | [
"silphire@gmail.com"
] | silphire@gmail.com |
240707e34591a5f96f21763228a52c2a2efccf8d | bdfd3889e1cc02f97b3e2dc0032ce0c9b59bf37e | /src/gork/contrib/gbase/context_processors.py | 63035d955323f3fdfe2e9991d70633f56e88470d | [
"MIT"
] | permissive | indexofire/gork | c85728953cfa9ab98c59b79a440d4e12212cbc4e | c5e172b896a51c15f358d3aabbcb66af837b54b2 | refs/heads/master | 2016-09-06T04:58:01.435002 | 2014-02-06T08:35:51 | 2014-02-06T08:35:51 | 9,260,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # -*- coding: utf-8 -*-
from django.conf import settings
def site_info(request):
return {
'SITE_NAME': getattr(settings, 'SITE_NAME', None),
'CODE_AUTHOR': getattr(settings, 'CODE_AUTHOR', None),
'TEMPLATE_AUTHOR': getattr(settings, 'TEMPLATE_AUTHOR', None),
'SITE_URL': getattr(settings, 'SITE_URL', None),
'SITE_DESCRIPTION': getattr(settings, 'SITE_DESCRIPTION', None),
}
| [
"indexofire@gmail.com"
] | indexofire@gmail.com |
93d50406711a9575123d20b27934aaf6d01e5e66 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02982/s044044811.py | 26dc9cd58c1c9f23cda90d6764b4c3a68593bf42 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | import numpy as np
N,D = map(int,input().split())
x = np.array([[int(i) for i in input().split()] for _ in range(N)])
count = 0
for i in range(N):
for j in range(i+1,N):
if float.is_integer(np.linalg.norm(x[i][:]-x[j][:], ord=2)):
count += 1
print(count)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d970b1196ea3e6aef62e8c6538c5f9e14955d85d | fb4afc975c4df4ea59366e6602a1359dc484aaa4 | /silvio/check_eta2_distribution_fit.py | fb40f599a34d5e475593ab6954b6d7d0154c1fed | [] | no_license | silviodonato/DijetRootTreeAnalyzer | b63181dc526f76cc67319f5c5292136fac522071 | a47fe92d1a6d6e35b030606fe6ee837bb0a4c2ca | refs/heads/master | 2021-06-24T10:45:34.198060 | 2019-06-04T15:19:38 | 2019-06-04T15:19:38 | 114,103,862 | 0 | 0 | null | 2018-12-04T15:19:11 | 2017-12-13T09:56:24 | C | UTF-8 | Python | false | false | 7,096 | py | import ROOT
import array
redoPlot = True
'''
ROOT.gROOT.SetBatch(0)
canv2 = ROOT.TCanvas()
'''
colors = [
ROOT.kBlack,
ROOT.kYellow+1,
ROOT.kRed,
ROOT.kMagenta,
ROOT.kBlue,
ROOT.kCyan+1,
ROOT.kGreen+1,
ROOT.kOrange,
ROOT.kPink,
ROOT.kViolet,
ROOT.kAzure,
ROOT.kTeal,
ROOT.kSpring,
ROOT.kGray,
]
colors = colors * 20
#bins = range(200,800,50)
#bins = range(-3,3)
#bins = [i*1./10. for i in range(-25,26)]
bins = [i*1./10. for i in range(-25,26,2)]
#bins = [60,70,80,90]
ROOT.gStyle.SetOptStat(0)
#fileName = "../0683324A-6D0D-8443-A441-7FDF9D0CF9EC.root"
fileName = "../data_ntuple_CR_nocut.root"
#fileName = "../data_trig_eff_eta2.5.root"
#fileName = "../data_trig_eff_eta2.5_skim_40.root"
denTrigger = "abs(jet2_eta)<2.5 && abs(jet1_eta)<2.5 && isr_pt>50"
#denTrigger = "isr_pt>40"
#denTrigger = "isr_pt>=70 && HLT_CaloScoutingHT250"
preselect = denTrigger + "&& 1" #
title = "Jet2 eta"
varX = "jet2_eta"
varX_nbins, varX_min, varX_max = 30,-2.5,2.5
varX_title = "m_{jj}"
fit_min = 320
#######################################################
N = 4
dijet_eta_max = 3
#canv.SetTitle(title)
preselect += "&& (%s < %d)"%(varX,varX_max)
file_ = ROOT.TFile(fileName)
tree = file_.Get("tree")
if not type(tree)==ROOT.TTree:
tree = file_.Get("rootTupleTree/tree")
#tree.Draw("dijet_eta >> deta(100,0,%f)"%dijet_eta_max,"%s && dijet_mass>300"%(preselect) ,"")
#deta = ROOT.gDirectory.Get("deta")
#deta.Draw("HIST")
#x = array.array('d',[i*1./N for i in range(N)])
#y = array.array('d',[0 for i in range(N)])
#deta.GetQuantiles(N,y,x)
#bins = list(y)
#funct = ROOT.TF1("funct","pol4",0,3)
#deta.Fit(funct)
#funct.Draw("same")
#canv.SaveAs("histoMjj.root")
c2 = ROOT.TCanvas("c2","")
#c2.SetLogz()
import copy
g = ROOT.TGraph2D()
chi2 = {}
histos=[]
fits = []
for i in range(len(bins)-1):
preselect = denTrigger + "&& (jet1_eta)>%f && (jet1_eta)<%f"%(bins[i],bins[i+1]) #
tree.Draw("%s >> histo(%f,%f,%f)"%(varX,varX_nbins,varX_min,varX_max),"%s"%(preselect) ,"")
histo = ROOT.gDirectory.Get("histo")
histos.append(histo.Clone("%s < DijetMass < %s"%((round(bins[i],2)),round(bins[i+1],2))))
leg = ROOT.TLegend(0.52,0.7,0.9,0.9)
#leg.SetHeader("")
for i,histo in enumerate(histos):
histo.SetTitle("")
histo.GetXaxis().SetTitle("m(jj)")
histo.GetYaxis().SetTitle("AU")
histo.Sumw2()
histo.Scale(1./histo.Integral(-1,varX_nbins))
leg.AddEntry(histo,histo.GetName(),"l")
histo.SetLineColor(colors[i])
histo.SetLineWidth(2)
histo.SetMinimum(0)
histo.SetMaximum(2.*histo.GetMaximum())
# fit = ROOT.TF1("fit"+str(i),"gaus(0)+gaus(3)", varX_min, varX_max)
# fit.SetParameters(0.082, 2.7, -1, 0.03, 0.5, -2.0)
fit = ROOT.TF1("fit"+str(i),"gaus(0)", varX_min, varX_max)
fit.SetParameters(0.05, 0.01, 1.7)
fit.SetLineColor(colors[i])
fitr = histo.Fit(fit,"","",varX_min, varX_max)
fits.append(copy.deepcopy(fit))
print(fits[i].GetParameter(1))
# histos[-1].Fit(fit,"","",varX_min, varX_max)
for i,histo in enumerate(histos):
if i==0:
histo.Draw("ERR")
# histo.Draw("HIST,same")
else:
histo.Draw("ERR,same")
# histo.Draw("HIST,same")
print(fits[i].GetParameter(1))
fits[i].Draw("same")
#c2.SetLogy()
leg.Draw()
c2.SaveAs("eta2plotcheck.png")
c2.SaveAs("eta2plotcheck.pdf")
#g.Draw("LEGO")c2.SaveAs("plotDetacheck.png")
means = ROOT.TGraphErrors()
sigmas = ROOT.TGraphErrors()
for graph in [means,sigmas]:
npar = 2
if graph==means:
npar = 1
for i,fit in enumerate(fits):
val = (bins[i]+bins[i+1])/2
err = (float(bins[i+1])-bins[i])/2
graph.SetPoint(i,val,fit.GetParameter(npar))
graph.SetPointError(i,err,fit.GetParError(npar))
c3 = ROOT.TCanvas("c3")
means.Draw("APL")
means_fit = ROOT.TF1("means_fit","pol1")
means.Fit(means_fit)
c3.SaveAs("means.png")
c4 = ROOT.TCanvas("c4")
sigmas.Draw("APL")
sigmas_fit = ROOT.TF1("sigmas_fit","pol2")
sigmas_fit.SetParameters(2,0,-0.001)
sigmas.Fit(sigmas_fit)
c4.SaveAs("sigmas.png")
'''
import ROOT
import array
redoPlot = True
'''
ROOT.gROOT.SetBatch(0)
canv2 = ROOT.TCanvas()
'''
colors = [
ROOT.kBlack,
ROOT.kYellow+1,
ROOT.kRed,
ROOT.kMagenta,
ROOT.kBlue,
ROOT.kCyan+1,
ROOT.kGreen+1,
ROOT.kOrange,
ROOT.kPink,
ROOT.kViolet,
ROOT.kAzure,
ROOT.kTeal,
ROOT.kSpring,
ROOT.kGray,
]
#bins = range(200,800,50)
bins = range(-3,3)
#bins = [i/10. for i in range(-30,30)]
#bins = [60,70,80,90]
ROOT.gStyle.SetOptStat(0)
fileName = "../data_trig_eff_eta2.5.root"
#fileName = "../data_trig_eff_eta2.5_skim_40.root"
denTrigger = "1"
#denTrigger = "isr_pt>40"
#denTrigger = "isr_pt>=70 && HLT_CaloScoutingHT250"
preselect = denTrigger + "&& 1" #
title = "Jet2 eta"
varX = "jet2_eta"
varX_nbins, varX_min, varX_max = 30,-3,3
varX_title = "m_{jj}"
fit_min = 320
#######################################################
N = 4
dijet_eta_max = 3
#canv.SetTitle(title)
preselect += "&& (%s < %d)"%(varX,varX_max)
file_ = ROOT.TFile(fileName)
tree = file_.Get("tree")
if not type(tree)==ROOT.TTree:
tree = file_.Get("rootTupleTree/tree")
#tree.Draw("dijet_eta >> deta(100,0,%f)"%dijet_eta_max,"%s && dijet_mass>300"%(preselect) ,"")
#deta = ROOT.gDirectory.Get("deta")
#deta.Draw("HIST")
#x = array.array('d',[i*1./N for i in range(N)])
#y = array.array('d',[0 for i in range(N)])
#deta.GetQuantiles(N,y,x)
#bins = list(y)
#funct = ROOT.TF1("funct","pol4",0,3)
#deta.Fit(funct)
#funct.Draw("same")
#canv.SaveAs("histoMjj.root")
c2 = ROOT.TCanvas("c2","")
#c2.SetLogz()
import copy
g = ROOT.TGraph2D()
chi2 = {}
histos=[]
fits = []
for i in range(len(bins)-1):
preselect = denTrigger + "&& (jet1_eta)>%f && (jet1_eta)<%f"%(bins[i],bins[i+1]) #
tree.Draw("%s >> histo(%f,%f,%f)"%(varX,varX_nbins,varX_min,varX_max),"%s"%(preselect) ,"")
histo = ROOT.gDirectory.Get("histo")
histos.append(histo.Clone("%s < DijetMass < %s"%((round(bins[i],2)),round(bins[i+1],2))))
leg = ROOT.TLegend(0.52,0.7,0.9,0.9)
#leg.SetHeader("")
for i,histo in enumerate(histos):
histo.SetTitle("")
histo.GetXaxis().SetTitle("m(jj)")
histo.GetYaxis().SetTitle("AU")
histo.Sumw2()
histo.Scale(1./histo.Integral(-1,varX_nbins))
leg.AddEntry(histo,histo.GetName(),"l")
histo.SetLineColor(colors[i])
histo.SetLineWidth(2)
histo.SetMinimum(3E-2)
histo.SetMaximum(2.*histo.GetMaximum())
fit = ROOT.TF1("fit"+str(i),"gaus(0)+gaus(3)", varX_min, varX_max)
fit.SetParameters(0.082, 2.7, -1, 0.03, 0.5, -2.0)
fit.SetLineColor(colors[i])
histo.Fit(fit,"","",varX_min, varX_max)
fits.append(copy.deepcopy(fit))
print(fits[i].GetParameter(1))
# histos[-1].Fit(fit,"","",varX_min, varX_max)
for i,histo in enumerate(histos):
if i==0:
histo.Draw("ERR")
# histo.Draw("HIST,same")
else:
histo.Draw("ERR,same")
# histo.Draw("HIST,same")
print(fits[i].GetParameter(1))
fits[i].Draw("same")
#c2.SetLogy()
leg.Draw()
c2.SaveAs("eta2plotcheck.png")
c2.SaveAs("eta2plotcheck.pdf")
#g.Draw("LEGO")c2.SaveAs("plotDetacheck.png")
''' | [
"silvio.donato@cern.ch"
] | silvio.donato@cern.ch |
09d2b162d3aa555c6e0bdf0745efa0aa7326c043 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/connectors/source-github/unit_tests/unit_test.py | e7e3adf6bf81d5be520adfda3c14e9fdddcb3859 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 912 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from airbyte_cdk.sources.streams.http.auth import MultipleTokenAuthenticator
from source_github import SourceGithub
def test_single_token():
authenticator = SourceGithub()._get_authenticator({"access_token": "123"})
assert isinstance(authenticator, MultipleTokenAuthenticator)
assert ["123"] == authenticator._tokens
authenticator = SourceGithub()._get_authenticator({"credentials": {"access_token": "123"}})
assert ["123"] == authenticator._tokens
authenticator = SourceGithub()._get_authenticator({"credentials": {"personal_access_token": "123"}})
assert ["123"] == authenticator._tokens
def test_multiple_tokens():
authenticator = SourceGithub()._get_authenticator({"access_token": "123, 456"})
assert isinstance(authenticator, MultipleTokenAuthenticator)
assert ["123", "456"] == authenticator._tokens
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
4e0405087a86e930b76e5c4543867e7bfb2d61ac | e21599d08d2df9dac2dee21643001c0f7c73b24f | /Others/Modules/networking/ftplib/ftplib_1.py | f35db835c9a52e0f8890ef3ecb682380536ab8d7 | [] | no_license | herolibra/PyCodeComplete | c7bf2fb4ce395737f8c67749148de98a36a71035 | 4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b | refs/heads/master | 2022-07-17T05:39:03.554760 | 2020-05-03T07:00:14 | 2020-05-03T07:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # coding=utf-8
import ftplib
ftp = ftplib.FTP("www.python.org")
ftp.login("anonymous", "ftplib-example-1")
print ftp.dir()
ftp.quit()
| [
"ijumper@163.com"
] | ijumper@163.com |
85ae1095cbcec3fa5bc1d2dbb7910389e4b86fd0 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_6/cshjam001/question4.py | eaf38914b99ae858c1fb572ee3dd0ae6cf19b467 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | marks=input('Enter a space-separated list of marks:\n')
marks_list=marks.split(' ')
marks_list = [int(i) for i in marks_list]
fail=0
third=0
low_second=0
up_second=0
first=0
for i in marks_list:
if i<50:
fail+=1
elif i<60:
third+=1
elif i<70:
low_second+=1
elif i<75:
up_second+=1
elif i>=75:
first+=1
print('1 |','X'*first,sep='')
print('2+|','X'*up_second,sep='')
print('2-|','X'*low_second,sep='')
print('3 |','X'*third,sep='')
print('F |','X'*fail,sep='')
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
c8087889a488b72efcbfe902393e594a35f0fbde | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/face/force/side/test_government.py | e3bae4e0b77290e42845a540c14f38df61fdd566 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
import pandas as pd
import http.client, urllib.parse
from googletrans import Translator
# **********************************************
# *** Update or verify the following values. ***
# **********************************************
# Replace the subscriptionKey string value with your valid subscription key.
target_list = [ "bg", "zh-TW", "iw"]
def translate(text, target="zh-TW", mode='google'):
if mode == 'microsoft':
subscriptionKey = 'a5d3d77c3cf5eb42280cd4bca60e55c1'
host = 'api.microsofttranslator.com'
path = '/V2/Http.svc/Translate'
import xml.etree.ElementTree as ET
def get_suggestions (text, target):
params = '?to=' + target + '&text=' + urllib.parse.quote (text)
headers = {'fb54a99315ae023d88c6a52b004c2335': subscriptionKey}
conn = http.client.HTTPSConnection(host)
conn.request ("GET", path + params, None, headers)
response = conn.getresponse ()
ret = response.read ()
ret = ET.fromstring(ret).text
return ret
return get_suggestions((get_suggestions(text, target)), 'en')
else:
translator = Translator()
tmp = translator.translate(text, dest=target).text
return translator.translate(tmp).text
if __name__ == '__main__':
class_names = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
import pandas as pd
from tqdm import tqdm
train = pd.read_csv('input/train_clean.csv')
ln = len(train)
# while True:
# x = input()
# print(translate(x))
# print(translate(x, mode='microsoft'))
for i in tqdm(range(ln)):
row = train.iloc[i,:]
data = dict(row)
text = data['comment_text']
# assert type(text)==str
# urllib.parse.quote('')
# continue
# try:
data['comment_text'] = translate(text)
train = train.append(data, ignore_index=True)
# except Exception as e:
# print(text)
# print(e)
# try:
# data['comment_text'] = translate(text, mode='microsoft')
# train = train.append(data, ignore_index=True)
# except Exception as e:
# print(text)
# print(e)
print(train.shape)
train.to_csv('train_augmented.csv', index=False)
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
dfeb1a9ad400d0e5dbfd8aca2f98ca7ed2ab8cf7 | 98cd5ddf45a73aea64bbfac0c0104829d7231b81 | /T - Image + Square/info.py | 28870e30ab938ba5ac04edf47271a06dfd397f12 | [] | no_license | atheis4/ETC_Modes_Extra | 42508d523cfe632a3335e29f6e1e40af91df231b | d0ce221562105382a7a73cc6d280f4ad0eabf6f3 | refs/heads/master | 2022-04-04T11:15:07.335910 | 2020-01-03T20:27:32 | 2020-01-03T20:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | name = "T - Image + Square"
description = "Random Image Placement with Square Border"
knob1 = "Image Size"
knob2 = "Square Size"
knob3 = "Image Opacity"
knob4 = "Square Color"
released = "July 28 2017" | [
"media@critterandguitari.com"
] | media@critterandguitari.com |
78120becb61c39d95665cd156269b2bb021e6dca | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/580747_Two_quick_functions_object/recipe-580747.py | 537650eed6cc4cbf663fde4bce6deec57d9c17c5 | [
"MIT",
"BSD-2-Clause"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 2,535 | py | def oa(o):
for at in dir(o):
print at,
'''
Sample calls and output for oa() below:
# object attributes of a dict:
oa({})
__class__ __cmp__ __contains__ __delattr__ __delitem__ __doc__ __eq__ __format__
__ge__ __getattribute__ __getitem__ __gt__ __hash__ __init__ __iter__ __le__ __len__
__lt__ __ne__ __new__ __reduce__ __reduce_ex__ __repr__ __setattr__ __setitem__
__sizeof__ __str__ __subclasshook__ clear copy fromkeys get has_key items
iteritems iterkeys itervalues keys pop popitem setdefault update values viewitems
viewkeys viewvalues
# object attributes of a list:
oa([])
__add__ __class__ __contains__ __delattr__ __delitem__ __delslice__ __doc__ __eq__
__format__ __ge__ __getattribute__ __getitem__ __getslice__ __gt__ __hash__ __iadd__
__imul__ __init__ __iter__ __le__ __len__ __lt__ __mul__ __ne__ __new__
__reduce__ __reduce_ex__ __repr__ __reversed__ __rmul__ __setattr__ __setitem__
__setslice__ __sizeof__ __str__ __subclasshook__ append count extend index insert
pop remove reverse sort
# object attributes of an int:
oa(1)
__abs__ __add__ __and__ __class__ __cmp__ __coerce__ __delattr__ __div__ __divmod__
__doc__ __float__ __floordiv__ __format__ __getattribute__ __getnewargs__ __hash__
__hex__ __index__ __init__ __int__ __invert__ __long__ __lshift__ __mod__
__mul__ __neg__ __new__ __nonzero__ __oct__ __or__ __pos__ __pow__ __radd__ __rand__
__rdiv__ __rdivmod__ __reduce__ __reduce_ex__ __repr__ __rfloordiv__ __rlshift__
__rmod__ __rmul__ __ror__ __rpow__ __rrshift__ __rshift__ __rsub__ __rtruediv__
__rxor__ __setattr__ __sizeof__ __str__ __sub__ __subclasshook__ __truediv__
__trunc__ __xor__ bit_length conjugate denominator imag numerator real
'''
def oar(o):
for at in dir(o):
if not at.startswith('__') and not at.endswith('__'):
print at,
'''
# regular (meaning non-dunder) object attributes of a dict:
oar({})
clear copy fromkeys get has_key items iteritems iterkeys itervalues keys pop popitem
setdefault update values viewitems viewkeys viewvalues
# regular object attributes of an int:
oar(1)
bit_length conjugate denominator imag numerator real
# regular object attributes of a string:
oar('')
_formatter_field_name_split _formatter_parser capitalize center count decode encode
endswith expandtabs find format index isalnum isalpha isdigit islower isspace
istitle isupper join ljust lower lstrip partition replace rfind rindex rjust rpartition
rsplit rstrip split splitlines startswith strip swapcase title translate upper zfil
'''
| [
"betty@qburst.com"
] | betty@qburst.com |
117fe1342a3efe8540e633aa9a62d928ac69653d | 8d9318a33afc2c3b5ca8ac99fce0d8544478c94a | /Books/Casandra DB/DataStax/resources/spark/python/examples/wordcount.py | 52f53c6f8b231953db69166e09e3ccc8a4d5cd1c | [] | no_license | tushar239/git-large-repo | e30aa7b1894454bf00546312a3fb595f6dad0ed6 | 9ee51112596e5fc3a7ab2ea97a86ec6adc677162 | refs/heads/master | 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ca373d7911f67e869369ad2f602e2739ff048b81941c922a74136867cf33a91d
size 1321
| [
"tushar239@gmail.com"
] | tushar239@gmail.com |
1c7e5a7390304c57008517d71539568ca946b451 | 0580861bd8b993ac92faec0ed88a339975d702c0 | /reagent/core/observers.py | 8d56984ae533afe510457644890fada46dbd303f | [
"BSD-3-Clause"
] | permissive | Sandy4321/ReAgent | 346094ae4c98121de5c54d504186f583de21daf0 | 0a387c1aeb922d242c705338fae9379becc82814 | refs/heads/master | 2023-07-17T01:27:17.762206 | 2021-08-19T03:15:15 | 2021-08-19T03:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Any, Dict, Iterable, List, Optional
from reagent.core.tensorboardX import SummaryWriterContext
from reagent.core.tracker import Aggregator, Observer
logger = logging.getLogger(__name__)
class CompositeObserver(Observer):
"""
A composite observer which takes care of dispatching values to child observers
"""
def __init__(self, observers: Iterable[Observer]):
self.observers: Dict[str, List[Observer]] = {}
for observer in observers:
observing_keys = observer.get_observing_keys()
for key in observing_keys:
self.observers.setdefault(key, []).append(observer)
super().__init__(list(self.observers))
def update(self, key: str, value):
for observer in self.observers[key]:
observer.update(key, value)
class EpochEndObserver(Observer):
"""
Call the callback function with epoch # when the epoch ends
"""
def __init__(self, callback, key: str = "epoch_end"):
super().__init__(observing_keys=[key])
self.callback = callback
def update(self, key: str, value):
self.callback(value)
class ValueListObserver(Observer):
"""
Simple observer that collect values into a list
"""
def __init__(self, observing_key: str):
super().__init__(observing_keys=[observing_key])
self.observing_key = observing_key
self.values: List[Any] = []
def update(self, key: str, value):
self.values.append(value)
def reset(self):
self.values = []
class TensorBoardScalarObserver(Observer):
def __init__(self, key: str, logging_key: Optional[str]):
super().__init__(observing_keys=[key])
self.key = key
self.logging_key = logging_key or key
def update(self, key: str, value):
SummaryWriterContext.add_scalar(self.logging_key, value)
class IntervalAggregatingObserver(Observer):
def __init__(
self,
interval: Optional[int],
aggregator: Aggregator,
observe_epoch_end: bool = True,
):
self.key = aggregator.key
obs_keys = ["epoch_end"] if observe_epoch_end else []
obs_keys.append(self.key)
super().__init__(observing_keys=obs_keys)
self.iteration = 0
self.interval = interval
self.intermediate_values: List[Any] = []
self.aggregator = aggregator
def update(self, key: str, value):
if key == "epoch_end":
self.flush()
return
self.intermediate_values.append(value)
self.iteration += 1
# pyre-fixme[58]: `%` is not supported for operand types `int` and
# `Optional[int]`.
if self.interval and self.iteration % self.interval == 0:
logger.info(
"Aggregating values over the recent interval for %s at iteration %s; aggregator: %s",
self.key,
self.iteration,
self.aggregator.__class__.__name__,
)
self.aggregator(self.key, self.intermediate_values)
self.intermediate_values = []
def flush(self):
# We need to reset iteration here to avoid aggregating on the same data multiple
# times
logger.info(
f"Interval Agg. Flushing: {self.key}; iteration: {self.iteration}; "
f"aggregator: {self.aggregator.__class__.__name__}; points: {len(self.intermediate_values)}"
)
self.iteration = 0
if self.intermediate_values:
self.aggregator(self.key, self.intermediate_values)
self.intermediate_values = []
self.aggregator.flush()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
097955982d9a4cd85cd972d01f493310df17eb98 | 43206144544e89d1510f8f77c443735200222f27 | /demo_zinnia_bitly/settings.py | 721a88ca90133e533008ec7ec45dec0e08e651fe | [
"BSD-3-Clause"
] | permissive | SteveByerly/zinnia-url-shortener-bitly | e655aa4bb4b560a67a28c84c2ddf9126d4904f7b | 67bf0681e3d16bf27c05cc0a91229cdc1ad3fb8e | refs/heads/develop | 2021-01-21T08:38:20.303625 | 2015-04-20T23:23:46 | 2015-04-20T23:23:46 | 34,291,673 | 0 | 0 | null | 2015-04-20T23:13:25 | 2015-04-20T23:13:25 | null | UTF-8 | Python | false | false | 2,440 | py | """Settings for the zinnia-bitly demo"""
import os
gettext = lambda s: s
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {'default':
{'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'demo.db')}
}
TIME_ZONE = 'Europe/Paris'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
SECRET_KEY = 'jo-1rzm(%sf)3#n+fa7j945yuv3(pt63abhi12_t7e^^5q8dyw'
USE_TZ = True
USE_I18N = True
USE_L10N = True
SITE_ID = 1
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', gettext('English')),
('fr', gettext('French')),
('de', gettext('German')),
('es', gettext('Spanish')),
('it', gettext('Italian')),
('nl', gettext('Dutch')),
('sl', gettext('Slovenian')),
('bg', gettext('Bulgarian')),
('hu', gettext('Hungarian')),
('cs', gettext('Czech')),
('sk', gettext('Slovak')),
('lt', gettext('Lithuanian')),
('ru', gettext('Russian')),
('pl', gettext('Polish')),
('eu', gettext('Basque')),
('he', gettext('Hebrew')),
('ca', gettext('Catalan')),
('tr', gettext('Turkish')),
('sv', gettext('Swedish')),
('hr_HR', gettext('Croatian')),
('pt_BR', gettext('Brazilian Portuguese')),
('fi_FI', gettext('Finnish (Finland)')),
('zh_CN', gettext('Simplified Chinese')),
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'demo_zinnia_bitly.urls'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'zinnia.context_processors.version',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sitemaps',
'django.contrib.comments',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.staticfiles',
'mptt',
'zinnia',
'tagging',
'django_bitly',
)
ZINNIA_URL_SHORTENER_BACKEND = 'zinnia_bitly'
BITLY_LOGIN = 'YOUR_LOGIN'
BITLY_API_KEY = 'YOUR_API_KEY'
| [
"fantomas42@gmail.com"
] | fantomas42@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.