blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb518cf4285b1538c6a12b8b2b0d80ae2cf354a2 | fab7b6e422b74424fb59398635f74faca9ff5a58 | /waimak_extended_boundry/model_and_NSMC_build/targets/well_bulding_script.py | f2f0b033d08be8757829d48282d0a4eece8d3768 | [] | no_license | hansonmcoombs/Waimakariri-Model-Ashley-to-Selwyn | c7a56a2ebd0d421c9679cb4a16ae319dfb2041b1 | c96c2663b010975ec08d42840fbc7970f3c2b085 | refs/heads/master | 2023-05-29T10:57:33.916912 | 2020-04-23T21:32:21 | 2020-04-23T21:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # -*- coding: utf-8 -*-
"""
Author: mattH
Date Created: 25/07/2017 11:13 AM
"""
import pandas as pd
import numpy as np
import flopy_mh as flopy
#this script was passed to brioch for inclusion in the pest optimisation process
# the influx wells are presently set to 1 m3/s so the muliplier can range between 0 and 5
well_data = pd.read_csv()# set path
muliplier = 1 # set muliplier
well_data.loc[well_data.type=='lr_boundry_flux','flux'] *= muliplier
g = well_data.groupby(['layer', 'row', 'col'])
outdata = g.aggregate({'flux': np.sum}).reset_index()
outdata = outdata.rename(columns={'layer': 'k', 'row': 'i', 'col': 'j'}).to_records(False)
outdata = outdata.astype(flopy.modflow.ModflowWel.get_default_dtype())
#write into file | [
"hansonmcoombs@gmail.com"
] | hansonmcoombs@gmail.com |
781488f21981ce3a35f5823fd69ba77f85484509 | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/dialogsinterface.py | 01e537573c7b9639810015d345977ad9df740966 | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | # Embedded file name: scripts/client/gui/DialogsInterface.py
from gui.Scaleform.Waiting import Waiting
from gui.battle_control import g_sessionProvider
from gui.shared import events, g_eventBus
from gui.shared.utils.decorators import dialog
from gui.shared.utils.functions import showInformationDialog, showConfirmDialog
from gui.Scaleform.daapi.view.dialogs import I18nInfoDialogMeta, I18nConfirmDialogMeta, DisconnectMeta
@dialog
def showDialog(meta, callback):
g_eventBus.handleEvent(events.ShowDialogEvent(meta, callback))
@dialog
def showI18nInfoDialog(i18nKey, callback, meta = None):
if g_sessionProvider.isBattleUILoaded():
customMsg = None
if meta is not None:
customMsg.getMessage()
showInformationDialog(i18nKey, callback, customMessage=customMsg, ns='battle')
else:
showDialog(I18nInfoDialogMeta(i18nKey, meta=meta), callback)
return
@dialog
def showI18nConfirmDialog(i18nKey, callback, meta = None, focusedID = None):
if g_sessionProvider.isBattleUILoaded():
customMsg = None
if meta is not None:
customMsg.getMessage()
showConfirmDialog(i18nKey, callback, customMessage=customMsg, ns='battle')
else:
showDialog(I18nConfirmDialogMeta(i18nKey, meta=meta, focusedID=focusedID), callback)
return
__ifDisconnectDialogShown = False
def showDisconnect(reason = None, isBan = False, expiryTime = None):
global __ifDisconnectDialogShown
if __ifDisconnectDialogShown:
return
Waiting.close()
def callback(_):
global __ifDisconnectDialogShown
__ifDisconnectDialogShown = False
__ifDisconnectDialogShown = True
showDialog(DisconnectMeta(reason, isBan, expiryTime), callback)
| [
"info@webium.sk"
] | info@webium.sk |
35356795d538348ce21e6b7b7d750bc012feb21d | 29da2ca6def1270be13a3096685a8e5d82828dff | /CIM14/IEC61970/Dynamics/TurbineGovernors/TurbineGovernor.py | 8395e78690c1c031b3d3c2f1fa248e1347a8fed4 | [
"MIT"
] | permissive | rimbendhaou/PyCIM | 75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3 | d578bb0bf1af344342bd23344385ed9c06c2d0ee | refs/heads/master | 2022-04-28T01:16:12.673867 | 2020-04-16T02:19:09 | 2020-04-16T02:19:09 | 256,085,381 | 0 | 0 | MIT | 2020-04-16T02:15:20 | 2020-04-16T02:08:14 | null | UTF-8 | Python | false | false | 1,608 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.PowerSystemResource import PowerSystemResource
class TurbineGovernor(PowerSystemResource):
"""The turbine-governor determines the mechanical power (Pm) supplied to the generator model
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'TurbineGovernor' instance.
"""
super(TurbineGovernor, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| [
"rwl@thinker.cable.virginmedia.net"
] | rwl@thinker.cable.virginmedia.net |
599294b469224e38ff049081aca36eb3ce3d2e2f | bc441bb06b8948288f110af63feda4e798f30225 | /pipeline_sdk/model/resource_manage/filter_condition_pb2.py | 8ca58da50e2eaaea09cd76d058b1eaa644f6a04f | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,173 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: filter_condition.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pipeline_sdk.model.resource_manage import filter_data_source_pb2 as pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='filter_condition.proto',
package='resource_manage',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manage'),
serialized_pb=_b('\n\x16\x66ilter_condition.proto\x12\x0fresource_manage\x1a;pipeline_sdk/model/resource_manage/filter_data_source.proto\"\x93\x01\n\x0f\x46ilterCondition\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ompare\x18\x02 \x01(\t\x12/\n\x04left\x18\x03 \x01(\x0b\x32!.resource_manage.FilterDataSource\x12\x30\n\x05right\x18\x04 \x01(\x0b\x32!.resource_manage.FilterDataSourceBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manageb\x06proto3')
,
dependencies=[pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2.DESCRIPTOR,])
_FILTERCONDITION = _descriptor.Descriptor(
name='FilterCondition',
full_name='resource_manage.FilterCondition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='resource_manage.FilterCondition.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compare', full_name='resource_manage.FilterCondition.compare', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left', full_name='resource_manage.FilterCondition.left', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right', full_name='resource_manage.FilterCondition.right', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=252,
)
_FILTERCONDITION.fields_by_name['left'].message_type = pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2._FILTERDATASOURCE
_FILTERCONDITION.fields_by_name['right'].message_type = pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2._FILTERDATASOURCE
DESCRIPTOR.message_types_by_name['FilterCondition'] = _FILTERCONDITION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FilterCondition = _reflection.GeneratedProtocolMessageType('FilterCondition', (_message.Message,), {
'DESCRIPTOR' : _FILTERCONDITION,
'__module__' : 'filter_condition_pb2'
# @@protoc_insertion_point(class_scope:resource_manage.FilterCondition)
})
_sym_db.RegisterMessage(FilterCondition)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
1b6353b6528b8a595913455a59dab726fa53fa19 | 95eed88115075f7e1916a14de7497d05a12a9330 | /abc106d.py | f7b6d1f202fe22e2ff2f13477e4221a8bc71c642 | [] | no_license | ynagi2/atcoder | bdbbd030f1dd39e937b0872b028ce0f38372521e | e404f4500d837bfd6ca473aa2837f46ae71ad84a | refs/heads/master | 2022-04-29T12:48:44.229462 | 2022-04-22T15:04:50 | 2022-04-22T15:04:50 | 241,098,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # pypyにする
def main():
n, m, Q = map(int, input().split())
lr = [[0]*(n+1) for _ in range(n+1)]
for _ in range(m):
l, r = map(int, input().split())
lr[l][r] += 1
p = []
for _ in range(Q):
_list = list(map(int, input().split()))
p.append(_list)
sums = []
for l in lr:
csum = [0]*(n+2)
for i in range(n):
# lrは0~nで始めているので,今回はl[i+1]で足す
csum[i+1] = csum[i] + l[i+1]
sums.append(csum)
for e in p:
ans = 0
l, r = e[0], e[1]
# 与えられた区間内での計算
for c in sums[l:r+1]:
ans += (c[r] - c[l-1])
print (ans)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | ynagi2.noreply@github.com |
b0df1d3b85cb9522c935158901ae2bddeaecc037 | a2e1ade428b26fd9009d1ab9e6594f06755ec5a4 | /src/saas/bkuser_shell/categories/serializers.py | 7fce74839512a4fc3584e570c807f34b56a1f0c0 | [
"MIT"
] | permissive | luyouli/bk-user | f285c5ee415cfc8769727b16e3d75ecce3b469d5 | 8ea590958a5c6dd3c71d0b72e1d4866ce327efda | refs/heads/master | 2023-08-07T20:58:36.429072 | 2021-08-24T07:02:32 | 2021-08-24T07:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,716 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from bkuser_shell.bkiam.serializers import AuthInfoSLZ
from bkuser_shell.categories.constants import CategoryStatus
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import (
BooleanField,
CharField,
ChoiceField,
DateTimeField,
FileField,
IntegerField,
JSONField,
ListField,
Serializer,
SerializerMethodField,
)
class ExtraInfoSLZ(Serializer):
auth_infos = ListField(read_only=True, child=AuthInfoSLZ())
callback_url = CharField(read_only=True)
class CategoryMetaSLZ(Serializer):
"""用户目录基本信息"""
type = CharField(read_only=True)
description = CharField(read_only=True)
name = CharField(read_only=True)
authorized = BooleanField(read_only=True)
extra_info = ExtraInfoSLZ(read_only=True)
class DetailCategorySerializer(Serializer):
id = IntegerField(required=False)
domain = CharField()
display_name = CharField()
default = BooleanField()
enabled = BooleanField()
type = CharField()
description = CharField()
create_time = DateTimeField()
update_time = DateTimeField()
last_synced_time = DateTimeField()
unfilled_namespaces = JSONField()
configured = BooleanField()
activated = SerializerMethodField()
def get_activated(self, obj) -> bool:
if isinstance(obj, dict):
return obj["status"] == CategoryStatus.NORMAL.value
else:
return getattr(obj, "status") == CategoryStatus.NORMAL.value
class CreateCategorySerializer(Serializer):
domain = CharField(max_length=64, label=_("登陆域"))
display_name = CharField(max_length=64, label=_("目录名"))
activated = BooleanField(default=True)
type = ChoiceField(default="local", choices=["mad", "ldap", "local"])
class UpdateCategorySerializer(Serializer):
display_name = CharField(max_length=64, required=False)
activated = BooleanField(default=True, required=False)
description = CharField(required=False)
class ListCategorySerializer(Serializer):
only_enable = BooleanField(default=False)
class CategorySyncSerializer(Serializer):
file = FileField(required=False)
class CategoryTestConnectionSerializer(Serializer):
connection_url = CharField(required=False)
user = CharField(required=False)
password = CharField(required=False)
timeout_setting = IntegerField(required=False, default=120)
use_ssl = BooleanField(default=False, required=False)
class CategoryTestFetchDataSerializer(Serializer):
basic_pull_node = CharField(required=False)
user_filter = CharField(required=False)
organization_class = CharField(required=False)
user_group_filter = CharField(required=False)
class CategoryExportSerializer(Serializer):
department_ids = CharField()
def to_representation(self, instance):
data = super().to_representation(instance)
data["department_ids"] = data["department_ids"].split(",")
return data
| [
"bluesedenyu@gmail.com"
] | bluesedenyu@gmail.com |
a9b4edb83b779b911c53957fae262072b357d724 | c426f269e8f7598d78b0a3bcc5629bfe447d12f6 | /4_Interactive Programming Python (RICE)/MiniProjects/MiniProject3_StopWatch/MiniProject3_v02_user12_DNqZOhd30O_8.py | 322dcf356f777ff0c51ef0d4d02f84a38c735015 | [] | no_license | DmitryVakhrushev/Python | 8d05d083f63822622f43ea5d873b98ef4e8cfd15 | 9dd2f37bcdce25a5cc0146adb4513ed2e539b650 | refs/heads/master | 2022-11-13T18:39:44.131820 | 2020-06-28T02:29:17 | 2020-06-28T02:29:17 | 243,847,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | # template for "Stopwatch: The Game"
import simplegui
import time
# define global variables
curTime = 0
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
minutes = str(t // 600)
if ((t - (t//600)*600) // 10) < 10:
seconds = "0" + str((t - (t//600)*600) // 10)
else:
seconds = str((t - (t//600)*600) // 10)
millis = str(t%10)
conv = minutes + ":" + seconds + "." + millis
return conv
# define event handlers for buttons; "Start", "Stop", "Reset"
def startBtn():
tm.start()
def stopBtn():
tm.stop()
def resetBtn():
tm.stop()
global curTime
curTime = 0
# define event handler for timer with 0.1 sec interval
def tick():
global curTime
curTime +=1
# define draw handler
def drawTime(canvas):
global curTime
canvas.draw_text(format(curTime), (80, 120), 60, "White")
# create frame
f = simplegui.create_frame("Stopwatch", 300, 200)
# register event handlers
tm = simplegui.create_timer(100, tick)
f.set_draw_handler(drawTime)
start = f.add_button("Start", startBtn, 150)
stop = f.add_button("Stop", stopBtn, 150)
reset = f.add_button("Reset", resetBtn, 150)
# start frame
f.start()
tm.start()
# Please remember to review the grading rubric
| [
"dm.vakhrushev@gmail.com"
] | dm.vakhrushev@gmail.com |
145f4d44a7d58355228c02ba21b610bc4e85e637 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02619/s875095596.py | 54f8a5836b379689a3036484bd7fc2bf077aa557 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import sys
def input():
return sys.stdin.readline()[:-1]
d = int(input())
c = list(map(int, input().split()))
C = sum(c)
s = [list(map(int, input().split())) for _ in range(d)]
t = [int(input()) for _ in range(d)]
def score(t):
res = 0
minus = 0
last = [0 for _ in range(26)]
for i, x in enumerate(t):
minus += C - c[x-1] * (i - last[x-1] + 1)
res -= minus
res += s[i][x-1]
last[x-1] = i+1
print(res)
return
score(t) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fbd1236ee2905d9e213d9ee829032acb53361778 | 20b4be7df5efeb8019356659c5d054f29f450aa1 | /API/gdax/__init__.py | 152d1b6d1e91f266693e8c11ceadc560286aec43 | [
"Apache-2.0",
"MIT"
] | permissive | kumars99/TradzQAI | 75c4138e30796573d67a5f08d9674c1488feb8e4 | 1551321642b6749d9cf26caf2e822051a105b1a5 | refs/heads/master | 2020-03-29T20:14:45.562143 | 2018-09-25T16:07:21 | 2018-09-25T16:07:21 | 150,302,554 | 1 | 0 | null | 2018-09-25T17:17:54 | 2018-09-25T17:17:54 | null | UTF-8 | Python | false | false | 174 | py | from .authenticated_client import AuthenticatedClient
from .public_client import PublicClient
from .websocket_client import WebsocketClient
from .order_book import OrderBook
| [
"awakeproduction@hotmail.fr"
] | awakeproduction@hotmail.fr |
f4269983761d21127d14240f5c8bf6a09d96ad3c | 05c70f2396e81328b1e8e1155994fccd52104fad | /databricks/notebooks/tools/mlflow_http_client.py | 14e17b5444eeec41c8a515e81d1abbf48a21d165 | [] | no_license | amesar/mlflow-fun | 0e015189e546f39b730375b292288bac1210fb88 | 31b575b97329e78bd9b0c062a270e7375f10e170 | refs/heads/master | 2023-05-13T04:55:57.367744 | 2022-11-28T16:24:12 | 2022-11-28T16:24:12 | 142,216,836 | 27 | 15 | null | 2023-05-09T18:06:50 | 2018-07-24T21:56:27 | Python | UTF-8 | Python | false | false | 1,420 | py | # Databricks notebook source
# MAGIC %md ### MlflowHttpClient - Requests Client for MLflow REST API
# MAGIC * See: https://mlflow.org/docs/latest/rest-api.html
# MAGIC * See notebook [test_mlflow_http_client](https://demo.cloud.databricks.com/#notebook/3652184) for usage
# COMMAND ----------
import requests
class MlflowHttpClient(object):
def __init__(self):
self.token = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().get()
host_name = dbutils.notebook.entry_point.getDbutils().notebook().getContext().tags().get("browserHostName").get()
self.base_uri = "https://{}/api/2.0/preview/mlflow".format(host_name)
def get(self, path):
uri = self.create_uri(path)
rsp = requests.get(uri, headers={'Authorization': 'Bearer '+self.token})
self.check_response(rsp, uri)
return rsp.text
def post(self, path,data):
uri = self.create_uri(path)
rsp = requests.post(self.create_uri(path), headers={'Authorization': 'Bearer '+self.token}, data=data)
self.check_response(rsp, uri)
return rsp.text
def create_uri(self, path):
return "{}/{}".format(self.base_uri,path)
def check_response(self, rsp, uri):
if rsp.status_code < 200 or rsp.status_code > 299:
raise Exception("HTTP status code: {} Reason: '{}' URL: {}".format(rsp.status_code,rsp.reason,uri)) | [
"amesar@users.noreply.github.com"
] | amesar@users.noreply.github.com |
2b766768a039858cfcc739797460163dfe150e89 | 1651184ccacf43c6a87864d5f0e4b4ea5453b98c | /backend/users/migrations/0002_auto_20201219_1700.py | d0e8017c1750c87d4aa5fd91ad4684584641a8a0 | [] | no_license | crowdbotics-apps/the-jumper-app-23425 | b20ef6908e2c4c2269dfba6d109d246044e10a08 | 470b864f8a8ae9638075c734709ae2b9aed1c7b4 | refs/heads/master | 2023-01-31T04:53:08.499998 | 2020-12-19T18:18:34 | 2020-12-19T18:18:34 | 322,364,725 | 0 | 0 | null | 2020-12-19T20:19:10 | 2020-12-17T17:22:56 | Python | UTF-8 | Python | false | false | 1,275 | py | # Generated by Django 2.2.17 on 2020-12-19 17:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="user",
name="last_updated",
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name="user",
name="timestamp_created",
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name="user",
name="email",
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name="user",
name="first_name",
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name="user",
name="last_name",
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
18adf7ef8f6ea9f3c2adfaaf8f694d42fb2d2a72 | 3880497e60f93cec22b86e1d77cf68c6546b4c51 | /liyida2/settings.py | 53eb68e450a685fe0cc0eb04826a663bef63e6bc | [] | no_license | li-yi-da/liyida_blog | b63ac1bf2add1a6a7d4b2af0a8a2e07c3d5c89ea | 32306f84f45a4f633de2bef17621a4b09d3120f8 | refs/heads/master | 2020-05-16T03:39:22.664695 | 2019-04-26T09:57:40 | 2019-04-26T09:57:40 | 182,736,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,968 | py | """
Django settings for liyida2 project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^z$11%5(f#ktdyg^8c3qtkn_-rt73h7aoqaqdh=0jvx@_yh1zl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01',
'myblog',
'DjangoUeditor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'Middle.m1.R1',
# 'Middle.m1.R2',
# 'Middle.m1.R3',
]
ROOT_URLCONF = 'liyida2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'liyida2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'liyida2',
'USER': 'root',
'PASSWORD': 'LIYIDafei103540',
'HOST': '45.76.66.211',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL ='/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'static'),
# os.path.join(BASE_DIR,'media'),
)
STATIC_ROOT = 'all_static_files'
import sys
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0, os.path.join(BASE_DIR, 'extra_apps'))
| [
"root@vultr.guest"
] | root@vultr.guest |
6df7faaace8e0d5146130f9fa68b5334f410dd30 | 61296b98e4d481893db4bc51d75652c7109ae626 | /0000_examples/xym_rrtcplanning_exe.py | b5c6912ba577bb01987d0e8664f3507732e13f06 | [
"MIT"
] | permissive | Shogo-Hayakawa/wrs | 23d4560b1062cf103ed32db4b2ef1fc2261dd765 | 405f15be1a3f7740f3eb7d234d96998f6d057a54 | refs/heads/main | 2023-08-19T19:29:15.409949 | 2021-11-02T01:22:29 | 2021-11-02T01:22:29 | 423,663,614 | 0 | 0 | MIT | 2021-11-02T00:59:17 | 2021-11-02T00:59:17 | null | UTF-8 | Python | false | false | 2,732 | py | import math
import numpy as np
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import robot_sim.robots.xarm7_shuidi_mobile.xarm7_shuidi_mobile as xav
import motion.probabilistic.rrt_connect as rrtc
import robot_con.xarm_shuidi.xarm.xarm_client as xac
base = wd.World(cam_pos=[3, 1, 2], lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
# object
object = cm.CollisionModel("./objects/bunnysim.stl")
object.set_pos(np.array([.85, 0, .37]))
object.set_rgba([.5,.7,.3,1])
object.attach_to(base)
# robot_s
component_name='arm'
robot_s = xav.XArm7YunjiMobile()
robot_s.fk(component_name, np.array([0, math.pi * 2 / 3, 0, math.pi, 0, -math.pi / 6, 0]))
# robot_x
robot_x = xac.XArm7(host="192.168.50.77:18300")
init_jnt_angles = robot_x.get_jnt_vlaues()
print(init_jnt_angles)
rrtc_planner = rrtc.RRTConnect(robot_s)
path = rrtc_planner.plan(start_conf=init_jnt_angles,
# goal_conf=np.array([math.pi/3, math.pi * 1 / 3, 0, math.pi/2, 0, math.pi / 6, 0]),
goal_conf = robot_s.manipulator_dict['arm'].homeconf,
obstacle_list=[object],
ext_dist= .1,
max_time=300,
component_name=component_name)
robot_x.move_jspace_path(path, time_interval=.1)
# print(path)
for pose in path:
# print(pose)
robot_s.fk(component_name, pose)
robot_meshmodel = robot_s.gen_meshmodel()
robot_meshmodel.attach_to(base)
# robot_meshmodel.show_cdprimit()
robot_s.gen_stickmodel().attach_to(base)
# hol1
# robot_s.hold(object, jawwidth=.05)
# robot_s.fk(np.array([0, 0, 0, math.pi/6, math.pi * 2 / 3, 0, math.pi, 0, -math.pi / 6, math.pi/6]))
# robot_meshmodel = robot_s.gen_meshmodel()
# robot_meshmodel.attach_to(base)
# robot_s.show_cdprimit()
# tic = time.time()
# result = robot_s.is_collided() # problematic
# toc = time.time()
# print(result, toc - tic)
# base.run()
# release
# robot_s.release(object, jawwidth=.082)
# robot_s.fk(np.array([0, 0, 0, math.pi/3, math.pi * 2 / 3, 0, math.pi, 0, -math.pi / 6, math.pi/6]))
# robot_meshmodel = robot_s.gen_meshmodel()
# robot_meshmodel.attach_to(base)
# robot_meshmodel.show_cdprimit()
# tic = time.time()
# result = robot_s.is_collided()
# toc = time.time()
# print(result, toc - tic)
#copy
# robot_instance2 = robot_s.copy()
# robot_instance2.move_to(pos=np.array([.5,0,0]), rotmat=rm.rotmat_from_axangle([0,0,1], math.pi/6))
# objcm_list = robot_instance2.get_hold_objlist()
# robot_instance2.release(objcm_list[-1], jawwidth=.082)
# robot_meshmodel = robot_instance2.gen_meshmodel()
# robot_meshmodel.attach_to(base)
# robot_instance2.show_cdprimit()
base.run()
| [
"wanweiwei07@gmail.com"
] | wanweiwei07@gmail.com |
a1e0d8b350d7ea0d8ccb0adaee667c82278dcfde | 3a02bff6397eb23afd55cc17faf81c24a8751f2d | /fsoft/Week 1/B2/bai20.py | 78996fb509a0e64714b00daeed6da53583635b3e | [] | no_license | cothuyanninh/Python_Code | 909fd4d798cbd856e8993f9d4fea55b4b7c97a1f | 7f657db61845cf8c06725a2da067df526e696b93 | refs/heads/master | 2022-11-06T01:00:39.939194 | 2019-01-13T15:27:38 | 2019-01-13T15:27:38 | 164,468,626 | 0 | 1 | null | 2022-10-13T16:16:21 | 2019-01-07T17:40:51 | Python | UTF-8 | Python | false | false | 225 | py | ip_src = input("Type: ").split(".")
ip_new_list = [int(i) for i in ip_src]
print(ip_new_list)
# print("."join(str(i) for i in ip_new_list))
result = ""
for i in ip_new_list:
result += str(i)
result += "."
print(result[:-1]) | [
"cothuyanninh@gmail.com"
] | cothuyanninh@gmail.com |
7a3099b7410237ec975d64598d9a19e6bbc19740 | e0760295cc8221dff41af7e98fb49dd77a8fca1e | /test_product_of_array_except_self.py | b3526de15d4571ac8cb2f5f5b3b01d1af6372d91 | [
"MIT"
] | permissive | jaebradley/leetcode.py | 422dd89749482fd9e98530ca1141737a6cdbfca4 | b37b14f49b4b6ee9304a3956b3b52f30d22fac29 | refs/heads/master | 2023-01-24T08:32:11.954951 | 2023-01-18T13:21:56 | 2023-01-18T13:21:56 | 177,721,059 | 1 | 0 | MIT | 2021-07-23T03:52:32 | 2019-03-26T05:32:18 | Python | UTF-8 | Python | false | false | 358 | py | from unittest import TestCase
from product_of_array_except_self import Solution
class TestProductOfArrayExceptSelf(TestCase):
def test_values(self):
self.assertEqual(Solution().productExceptSelf([1, 2, 3, 4]), [24, 12, 8, 6])
def test_values_2(self):
self.assertEqual(Solution().productExceptSelf([2, 3, 4, 5]), [60, 40, 30, 24])
| [
"jae.b.bradley@gmail.com"
] | jae.b.bradley@gmail.com |
613a1e04c91353377a76461f50ec234ef3598ccd | 472578974401c83509d81ea4d832fc3fd821f295 | /python资料/day8.7/day06/exercise03.py | 61083cd262387074b83c85661c8b524df53d0abd | [
"MIT"
] | permissive | why1679158278/python-stu | f038ec89e9c3c7cc80dc0ff83b76e7c3078e279e | 0d95451f17e1d583d460b3698047dbe1a6910703 | refs/heads/master | 2023-01-05T04:34:56.128363 | 2020-11-06T09:05:16 | 2020-11-06T09:05:16 | 298,263,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # 使用列表推导式
# 生成1--50之间能被3或者5整除的数字
# 生成5 -- 100之间的数字平方
# list_result = []
# for number in range(1, 51):
# if number % 3 == 0 or number % 5 == 0:
# list_result.append(number)
list_result = [number for number in range(1, 51) if number % 3 == 0 or number % 5 == 0]
print(list_result)
list_result = [number ** 2 for number in range(5, 101)]
print(list_result)
| [
"1679158278@qq.com"
] | 1679158278@qq.com |
09094452c5150588bd91de1f510c8944c0d702c4 | dfd1dba6fa990810b9609bd25a433c974ab2098d | /backend/purple_cloud_1/urls.py | c766a76997628b9a2824319a8a1a930b4daed168 | [] | no_license | saaaab1213/purple-cloud-1 | fbc54e73e2ba5bf6ed1db3acc9d2357031985343 | 10651eaf6ec13a656ed456ab9bfe504691e1d49d | refs/heads/master | 2023-02-08T02:46:46.009493 | 2021-01-05T09:54:22 | 2021-01-05T09:54:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | """purple_cloud_1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Purple Cloud"
admin.site.site_title = "Purple Cloud Admin Portal"
admin.site.index_title = "Purple Cloud Admin"
# swagger
api_info = openapi.Info(
title="Purple Cloud API",
default_version="v1",
description="API documentation for Purple Cloud App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"lorence@crowdbotics.com"
] | lorence@crowdbotics.com |
132ec6ef5bb76e2e07bc3e2a9960380a2a326fb4 | 88064e96a4ce3aaa472222c8f294799655e923b8 | /lesson07/exercise4.py | b8277aa2405a2c1a73075dafeb8f5860d524cbe7 | [] | no_license | manosxatz/python | 52d91b42dcd3e516296811a036f23a988542008d | 789f2306f7c6c1ad798c228000bb0d49e99c9629 | refs/heads/master | 2022-11-09T18:46:54.002464 | 2020-06-24T16:57:19 | 2020-06-24T16:57:19 | 275,238,123 | 1 | 0 | null | 2020-06-26T20:05:26 | 2020-06-26T20:05:26 | null | UTF-8 | Python | false | false | 929 | py | from random import seed
from random import randrange
from datetime import datetime # all 3 at the beginning
seed(datetime.now()) # once, before randint call
N = 30
pupils = set()
for number in range(N):
pupils.add("pupil" + str(number))
list_pupils = list(pupils)
math_teams = set()
for _ in range(N//2):
pos1 = randrange(0, len(list_pupils))
pupil1 = list_pupils.pop(pos1)
pos2 = randrange(0, len(list_pupils))
pupil2 = list_pupils.pop(pos2)
team = (pupil1, pupil2)
math_teams.add(team)
print("Math teams: " + str(math_teams))
list_pupils = list(pupils)
geography_teams = set()
for _ in range(N//2):
pos1 = randrange(0, len(list_pupils))
pupil1 = list_pupils.pop(pos1)
pos2 = randrange(0, len(list_pupils))
pupil2 = list_pupils.pop(pos2)
team = (pupil1, pupil2)
geography_teams.add(team)
print("Geography teams: " + str(geography_teams)) | [
"noreply@github.com"
] | manosxatz.noreply@github.com |
f1b5ec65d65f614da16fa1d29140d2ba61bc3f54 | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/addons/report_webkit/__yuancloud__.py | 4dff514cfa8d054469a394cd8baa16fae6ecef04 | [] | no_license | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | # -*- coding: utf-8 -*-
# Part of YuanCloud. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# Author : Nicolas Bessi (Camptocamp)
{
'name': 'Webkit Report Engine',
'description': """
This module adds a new Report Engine based on WebKit library (wkhtmltopdf) to support reports designed in HTML + CSS.
=====================================================================================================================
The module structure and some code is inspired by the report_openoffice module.
The module allows:
------------------
- HTML report definition
- Multi header support
- Multi logo
- Multi company support
- HTML and CSS-3 support (In the limit of the actual WebKIT version)
- JavaScript support
- Raw HTML debugger
- Book printing capabilities
- Margins definition
- Paper size definition
Multiple headers and logos can be defined per company. CSS style, header and
footer body are defined per company.
For a sample report see also the webkit_report_sample module, and this video:
http://files.me.com/nbessi/06n92k.mov
Requirements and Installation:
------------------------------
This module requires the ``wkhtmltopdf`` library to render HTML documents as
PDF. Version 0.9.9 or later is necessary, and can be found at
http://code.google.com/p/wkhtmltopdf/ for Linux, Mac OS X (i386) and Windows (32bits).
After installing the library on the YuanCloud Server machine, you may need to set
the path to the ``wkhtmltopdf`` executable file in a system parameter named
``webkit_path`` in Settings -> Customization -> Parameters -> System Parameters
If you are experiencing missing header/footer problems on Linux, be sure to
install a 'static' version of the library. The default ``wkhtmltopdf`` on
Ubuntu is known to have this issue.
TODO:
-----
* JavaScript support activation deactivation
* Collated and book format support
* Zip return for separated PDF
* Web client WYSIWYG
""",
'version': '0.9',
'depends': ['base','report'],
'author': '北京山水物源科技有限公司',
'category' : 'Tools', # i.e a technical module, not shown in Application install menu
'url': 'http://www.yuancloud.cn/page/tools',
'data': [ 'security/ir.model.access.csv',
'data.xml',
'wizard/report_webkit_actions_view.xml',
'company_view.xml',
'header_view.xml',
'ir_report_view.xml',
],
'demo': [
"report/webkit_report_demo.xml",
],
'test': [
"test/print.yml",
],
'installable': True,
'auto_install': False,
}
| [
"liuganghao@lztogether.com"
] | liuganghao@lztogether.com |
d216baf5afc15e5e4d3d49a521a2e2b3a26e18d2 | 9dde5311a5fe0357995a737eb8bc9b54a5cc21d8 | /betago/processor.py | 1fee7dd7acbbda0d32df2d67e948e987fa3f981f | [
"MIT"
] | permissive | maxpumperla/betago | 08163cbc5a61d4c5e19fd59adc4299a19427ec90 | ff06b467e16d7a7a22555d14181b723d853e1a70 | refs/heads/master | 2023-08-21T14:48:53.854515 | 2020-12-22T08:33:54 | 2020-12-22T08:33:54 | 56,266,535 | 747 | 192 | MIT | 2019-11-18T09:45:36 | 2016-04-14T20:02:28 | Python | UTF-8 | Python | false | false | 7,497 | py | from __future__ import absolute_import
import numpy as np
from .dataloader.base_processor import GoDataProcessor, GoFileProcessor
from six.moves import range
class SevenPlaneProcessor(GoDataProcessor):
'''
Implementation of a Go data processor, using seven planes of 19x19 values to represent the position of
a go board, as explained below.
This closely reflects the representation suggested in Clark, Storkey:
http://arxiv.org/abs/1412.3409
'''
def __init__(self, data_directory='data', num_planes=7, consolidate=True, use_generator=False):
super(SevenPlaneProcessor, self).__init__(data_directory=data_directory,
num_planes=num_planes,
consolidate=consolidate,
use_generator=use_generator)
def feature_and_label(self, color, move, go_board, num_planes):
'''
Parameters
----------
color: color of the next person to move
move: move they decided to make
go_board: represents the state of the board before they moved
Planes we write:
0: our stones with 1 liberty
1: our stones with 2 liberty
2: our stones with 3 or more liberties
3: their stones with 1 liberty
4: their stones with 2 liberty
5: their stones with 3 or more liberties
6: simple ko
'''
row, col = move
enemy_color = go_board.other_color(color)
label = row * 19 + col
move_array = np.zeros((num_planes, go_board.board_size, go_board.board_size))
for row in range(0, go_board.board_size):
for col in range(0, go_board.board_size):
pos = (row, col)
if go_board.board.get(pos) == color:
if go_board.go_strings[pos].liberties.size() == 1:
move_array[0, row, col] = 1
elif go_board.go_strings[pos].liberties.size() == 2:
move_array[1, row, col] = 1
elif go_board.go_strings[pos].liberties.size() >= 3:
move_array[2, row, col] = 1
if go_board.board.get(pos) == enemy_color:
if go_board.go_strings[pos].liberties.size() == 1:
move_array[3, row, col] = 1
elif go_board.go_strings[pos].liberties.size() == 2:
move_array[4, row, col] = 1
elif go_board.go_strings[pos].liberties.size() >= 3:
move_array[5, row, col] = 1
if go_board.is_simple_ko(color, pos):
move_array[6, row, col] = 1
return move_array, label
class ThreePlaneProcessor(GoDataProcessor):
'''
Simpler version of the above processor using just three planes. This data processor uses one plane for
stone positions of each color and one for ko.
'''
def __init__(self, data_directory='data', num_planes=3, consolidate=True, use_generator=False):
super(ThreePlaneProcessor, self).__init__(data_directory=data_directory,
num_planes=num_planes,
consolidate=consolidate,
use_generator=use_generator)
def feature_and_label(self, color, move, go_board, num_planes):
'''
Parameters
----------
color: color of the next person to move
move: move they decided to make
go_board: represents the state of the board before they moved
Planes we write:
0: our stones
1: their stones
2: ko
'''
row, col = move
enemy_color = go_board.other_color(color)
label = row * 19 + col
move_array = np.zeros((num_planes, go_board.board_size, go_board.board_size))
for row in range(0, go_board.board_size):
for col in range(0, go_board.board_size):
pos = (row, col)
if go_board.board.get(pos) == color:
move_array[0, row, col] = 1
if go_board.board.get(pos) == enemy_color:
move_array[1, row, col] = 1
if go_board.is_simple_ko(color, pos):
move_array[2, row, col] = 1
return move_array, label
class SevenPlaneFileProcessor(GoFileProcessor):
'''
File processor corresponding to the above data processor. Loading all available data into memory is simply
not feasible, and this class allows preprocessing into an efficient, binary format.
'''
def __init__(self, data_directory='data', num_planes=7, consolidate=True):
super(SevenPlaneFileProcessor, self).__init__(data_directory=data_directory,
num_planes=num_planes, consolidate=consolidate)
def store_results(self, data_file, color, move, go_board):
'''
Parameters
----------
color: color of the next person to move
move: move they decided to make
go_board: represents the state of the board before they moved
Planes we write:
0: our stones with 1 liberty
1: our stones with 2 liberty
2: our stones with 3 or more liberties
3: their stones with 1 liberty
4: their stones with 2 liberty
5: their stones with 3 or more liberties
6: simple ko
'''
row, col = move
enemy_color = go_board.other_color(color)
data_file.write('GO')
label = row * 19 + col
data_file.write(chr(label % 256))
data_file.write(chr(label // 256))
data_file.write(chr(0))
data_file.write(chr(0))
thisbyte = 0
thisbitpos = 0
for plane in range(0, 7):
for row in range(0, go_board.board_size):
for col in range(0, go_board.board_size):
thisbit = 0
pos = (row, col)
if go_board.board.get(pos) == color:
if plane == 0 and go_board.go_strings[pos].liberties.size() == 1:
thisbit = 1
elif plane == 1 and go_board.go_strings[pos].liberties.size() == 2:
thisbit = 1
elif plane == 2 and go_board.go_strings[pos].liberties.size() >= 3:
thisbit = 1
if go_board.board.get(pos) == enemy_color:
if plane == 3 and go_board.go_strings[pos].liberties.size() == 1:
thisbit = 1
elif plane == 4 and go_board.go_strings[pos].liberties.size() == 2:
thisbit = 1
elif plane == 5 and go_board.go_strings[pos].liberties.size() >= 3:
thisbit = 1
if plane == 6 and go_board.is_simple_ko(color, pos):
thisbit = 1
thisbyte = thisbyte + (thisbit << (7 - thisbitpos))
thisbitpos = thisbitpos + 1
if thisbitpos == 8:
data_file.write(chr(thisbyte))
thisbitpos = 0
thisbyte = 0
if thisbitpos != 0:
data_file.write(chr(thisbyte))
| [
"max.pumperla@googlemail.com"
] | max.pumperla@googlemail.com |
e42f78d219dc229a17fb5870bdb01f72df7e0996 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03105/s666582429.py | ab016687c44a5c29f7baead18b3f59c2b7730337 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #from statistics import median
#import collections
#aa = collections.Counter(a) # list to list || .most_common(2)で最大の2個とりだせるお a[0][0]
from fractions import gcd
from itertools import combinations,permutations,accumulate, product # (string,3) 3回
#from collections import deque
from collections import deque,defaultdict,Counter
import decimal
import re
#import bisect
#
# d = m - k[i] - k[j]
# if kk[bisect.bisect_right(kk,d) - 1] == d:
#
#
#
# pythonで無理なときは、pypyでやると正解するかも!!
#
#
# my_round_int = lambda x:np.round((x*2 + 1)//2)
# 四捨五入g
import sys
sys.setrecursionlimit(10000000)
mod = 10**9 + 7
#mod = 9982443453
def readInts():
return list(map(int,input().split()))
def I():
return int(input())
a,b,c = readInts()
print(min(c, b//a))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
50de8f8f99b8bc3466cf24ce85afa5ad4e51cfdb | e232de1f42a922dc0c94d889c1f72d4f66b325d6 | /genfiles/proto/rec/gui/proto/Resizer_pb2.py | 065e69fbf5842f8fa39226a86cfa8d2e31a3ed35 | [] | no_license | rec/slow_gold | b19fcd684e469978bf20cd0638fa83786fc5ffae | f4551785cf7f9cf45605a850d013eef5d80f4ea6 | refs/heads/master | 2022-10-20T06:37:00.370927 | 2017-02-04T11:39:59 | 2017-02-04T11:39:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 1,563 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: rec/gui/proto/Resizer.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='rec/gui/proto/Resizer.proto',
package='rec.gui',
serialized_pb='\n\x1brec/gui/proto/Resizer.proto\x12\x07rec.gui\"!\n\x0cResizerProto\x12\x11\n\tmin_value\x18\x01 \x01(\t')
_RESIZERPROTO = _descriptor.Descriptor(
name='ResizerProto',
full_name='rec.gui.ResizerProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_value', full_name='rec.gui.ResizerProto.min_value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=40,
serialized_end=73,
)
DESCRIPTOR.message_types_by_name['ResizerProto'] = _RESIZERPROTO
class ResizerProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RESIZERPROTO
# @@protoc_insertion_point(class_scope:rec.gui.ResizerProto)
# @@protoc_insertion_point(module_scope)
| [
"tom@swirly.com"
] | tom@swirly.com |
4203d1a854a8dd37fdb9eda8a1e7cac35ecb4cee | b162de01d1ca9a8a2a720e877961a3c85c9a1c1c | /165.compare-version-numbers.python3.py | 87a07d78743ccbd88f763f85f7771909751a5969 | [] | no_license | richnakasato/lc | 91d5ff40a1a3970856c76c1a53d7b21d88a3429c | f55a2decefcf075914ead4d9649d514209d17a34 | refs/heads/master | 2023-01-19T09:55:08.040324 | 2020-11-19T03:13:51 | 2020-11-19T03:13:51 | 114,937,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | #
# [165] Compare Version Numbers
#
# https://leetcode.com/problems/compare-version-numbers/description/
#
# algorithms
# Medium (22.00%)
# Total Accepted: 118.9K
# Total Submissions: 540.6K
# Testcase Example: '"0.1"\n"1.1"'
#
# Compare two version numbers version1 and version2.
# If version1 > version2 return 1; if version1 < version2 return -1;otherwise
# return 0.
#
# You may assume that the version strings are non-empty and contain only digits
# and the . character.
# The . character does not represent a decimal point and is used to separate
# number sequences.
# For instance, 2.5 is not "two and a half" or "half way to version three", it
# is the fifth second-level revision of the second first-level revision.
#
# Example 1:
#
#
# Input: version1 = "0.1", version2 = "1.1"
# Output: -1
#
# Example 2:
#
#
# Input: version1 = "1.0.1", version2 = "1"
# Output: 1
#
# Example 3:
#
#
# Input: version1 = "7.5.2.4", version2 = "7.5.3"
# Output: -1
#
#
class Solution:
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
| [
"richnakasato@hotmail.com"
] | richnakasato@hotmail.com |
a4a2d2dbd4a2b79ce6744d1e9304e8b3b5400cee | 1bed2f766620acf085ed2d7fd3e354a3482b8960 | /tests/components/roku/test_select.py | 003487c0adfdf8e753fab56230179045e1c451c3 | [
"Apache-2.0"
] | permissive | elupus/home-assistant | 5cbb79a2f25a2938a69f3988534486c269b77643 | 564150169bfc69efdfeda25a99d803441f3a4b10 | refs/heads/dev | 2023-08-28T16:36:04.304864 | 2022-09-16T06:35:12 | 2022-09-16T06:35:12 | 114,460,522 | 2 | 2 | Apache-2.0 | 2023-02-22T06:14:54 | 2017-12-16T12:50:55 | Python | UTF-8 | Python | false | false | 7,752 | py | """Tests for the Roku select platform."""
from unittest.mock import MagicMock
import pytest
from rokuecp import (
Application,
Device as RokuDevice,
RokuConnectionError,
RokuConnectionTimeoutError,
RokuError,
)
from homeassistant.components.roku.const import DOMAIN
from homeassistant.components.roku.coordinator import SCAN_INTERVAL
from homeassistant.components.select import DOMAIN as SELECT_DOMAIN
from homeassistant.components.select.const import ATTR_OPTION, ATTR_OPTIONS
from homeassistant.const import ATTR_ENTITY_ID, ATTR_ICON, SERVICE_SELECT_OPTION
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry as er
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_application_state(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_device: RokuDevice,
mock_roku: MagicMock,
) -> None:
"""Test the creation and values of the Roku selects."""
entity_registry = er.async_get(hass)
entity_registry.async_get_or_create(
SELECT_DOMAIN,
DOMAIN,
"1GU48T017973_application",
suggested_object_id="my_roku_3_application",
disabled_by=None,
)
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("select.my_roku_3_application")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:application"
assert state.attributes.get(ATTR_OPTIONS) == [
"Home",
"Amazon Video on Demand",
"Free FrameChannel Service",
"MLB.TV" + "\u00AE",
"Mediafly",
"Netflix",
"Pandora",
"Pluto TV - It's Free TV",
"Roku Channel Store",
]
assert state.state == "Home"
entry = entity_registry.async_get("select.my_roku_3_application")
assert entry
assert entry.unique_id == "1GU48T017973_application"
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.my_roku_3_application",
ATTR_OPTION: "Netflix",
},
blocking=True,
)
assert mock_roku.launch.call_count == 1
mock_roku.launch.assert_called_with("12")
mock_device.app = mock_device.apps[1]
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("select.my_roku_3_application")
assert state
assert state.state == "Netflix"
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.my_roku_3_application",
ATTR_OPTION: "Home",
},
blocking=True,
)
assert mock_roku.remote.call_count == 1
mock_roku.remote.assert_called_with("home")
mock_device.app = Application(
app_id=None, name="Roku", version=None, screensaver=None
)
async_fire_time_changed(hass, dt_util.utcnow() + (SCAN_INTERVAL * 2))
await hass.async_block_till_done()
state = hass.states.get("select.my_roku_3_application")
assert state
assert state.state == "Home"
@pytest.mark.parametrize(
"error, error_string",
[
(RokuConnectionError, "Error communicating with Roku API"),
(RokuConnectionTimeoutError, "Timeout communicating with Roku API"),
(RokuError, "Invalid response from Roku API"),
],
)
async def test_application_select_error(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_roku: MagicMock,
error: RokuError,
error_string: str,
) -> None:
"""Test error handling of the Roku selects."""
entity_registry = er.async_get(hass)
entity_registry.async_get_or_create(
SELECT_DOMAIN,
DOMAIN,
"1GU48T017973_application",
suggested_object_id="my_roku_3_application",
disabled_by=None,
)
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
mock_roku.launch.side_effect = error
with pytest.raises(HomeAssistantError, match=error_string):
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.my_roku_3_application",
ATTR_OPTION: "Netflix",
},
blocking=True,
)
state = hass.states.get("select.my_roku_3_application")
assert state
assert state.state == "Home"
assert mock_roku.launch.call_count == 1
mock_roku.launch.assert_called_with("12")
@pytest.mark.parametrize("mock_device", ["roku/rokutv-7820x.json"], indirect=True)
async def test_channel_state(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_device: RokuDevice,
mock_roku: MagicMock,
) -> None:
"""Test the creation and values of the Roku selects."""
entity_registry = er.async_get(hass)
state = hass.states.get("select.58_onn_roku_tv_channel")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:television"
assert state.attributes.get(ATTR_OPTIONS) == [
"99.1",
"QVC (1.3)",
"WhatsOn (1.1)",
"getTV (14.3)",
]
assert state.state == "getTV (14.3)"
entry = entity_registry.async_get("select.58_onn_roku_tv_channel")
assert entry
assert entry.unique_id == "YN00H5555555_channel"
# channel name
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.58_onn_roku_tv_channel",
ATTR_OPTION: "WhatsOn (1.1)",
},
blocking=True,
)
assert mock_roku.tune.call_count == 1
mock_roku.tune.assert_called_with("1.1")
mock_device.channel = mock_device.channels[0]
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("select.58_onn_roku_tv_channel")
assert state
assert state.state == "WhatsOn (1.1)"
# channel number
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.58_onn_roku_tv_channel",
ATTR_OPTION: "99.1",
},
blocking=True,
)
assert mock_roku.tune.call_count == 2
mock_roku.tune.assert_called_with("99.1")
mock_device.channel = mock_device.channels[3]
async_fire_time_changed(hass, dt_util.utcnow() + SCAN_INTERVAL)
await hass.async_block_till_done()
state = hass.states.get("select.58_onn_roku_tv_channel")
assert state
assert state.state == "99.1"
@pytest.mark.parametrize("mock_device", ["roku/rokutv-7820x.json"], indirect=True)
async def test_channel_select_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_roku: MagicMock,
) -> None:
"""Test error handling of the Roku selects."""
mock_roku.tune.side_effect = RokuError
with pytest.raises(HomeAssistantError, match="Invalid response from Roku API"):
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: "select.58_onn_roku_tv_channel",
ATTR_OPTION: "99.1",
},
blocking=True,
)
state = hass.states.get("select.58_onn_roku_tv_channel")
assert state
assert state.state == "getTV (14.3)"
assert mock_roku.tune.call_count == 1
mock_roku.tune.assert_called_with("99.1")
| [
"noreply@github.com"
] | elupus.noreply@github.com |
822862f3f180bee1dd9909c9182fcf3e328eaab8 | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /longestObstacleCourseAtEachPosition.py | 0ca0dd3396e0d71e0b4f329dc296a161e89b0e39 | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | from typing import List
class Solution:
def longestObstacleCourseAtEachPosition(self,
obstacles: List[int]) -> List[int]:
d = [obstacles[0]]
res = [1]
for ob in obstacles[1:]:
if ob >= d[-1]:
d.append(ob)
res.append(len(d))
else:
left = 0
right = len(d) - 1
while left <= right:
mid = (left + right) // 2
if d[mid] <= ob:
left = mid + 1
else:
right = mid - 1
res.append(left + 1)
d[left] = ob
return res
obstacles = [1, 2, 3, 2]
print(Solution().longestObstacleCourseAtEachPosition(obstacles))
| [
"zzz136454872@163.com"
] | zzz136454872@163.com |
b7987901a5a75205f490089306580edb93381290 | d579fdffa059724aff5a540e1ca6c12f508fd7b4 | /flex/django/ussd/screens/__init__.py | 4dafb9db0d2438990b3d4bade50a804a1be4d3ba | [
"MIT"
] | permissive | centergy/flex_ussd | 7493afddeea7c142e6ae6ee9f85406e165e65404 | ddc0ccd192e3a0a82e8b7705f088862d59656c28 | refs/heads/master | 2020-03-24T21:21:46.790958 | 2018-09-23T01:07:13 | 2018-09-23T01:07:13 | 143,027,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from .base import UssdScreenType, UssdScreen, UssdPayload, ScreenState, ScreenRef
from .base import get_screen, get_screen_uid, get_home_screen, render_screen
from .base import END, CON
| [
"davidmkyalo@gmail.com"
] | davidmkyalo@gmail.com |
887b1ddeae86361e13fc5082defd58656ea36555 | f71ee969fa331560b6a30538d66a5de207e03364 | /scripts/client/messenger/gui/scaleform/channels/bw_factories.py | 35ded875a0efc378793bae087ca90f289d31b095 | [] | no_license | webiumsk/WOT-0.9.8-CT | 31356ed01cb110e052ba568e18cb2145d4594c34 | aa8426af68d01ee7a66c030172bd12d8ca4d7d96 | refs/heads/master | 2016-08-03T17:54:51.752169 | 2015-05-12T14:26:00 | 2015-05-12T14:26:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,259 | py | # Embedded file name: scripts/client/messenger/gui/Scaleform/channels/bw_factories.py
import chat_shared
from constants import PREBATTLE_TYPE
from debug_utils import LOG_ERROR
from messenger.gui.Scaleform.channels import bw_lobby_controllers
from messenger.gui.Scaleform.channels import bw_battle_controllers
from messenger.gui.interfaces import IControllerFactory
from messenger.m_constants import LAZY_CHANNEL
from messenger.proto.bw import find_criteria
from messenger.storage import storage_getter
class LobbyControllersFactory(IControllerFactory):
def __init__(self):
super(LobbyControllersFactory, self).__init__()
@storage_getter('channels')
def channelsStorage(self):
return None
def init(self):
controllers = []
channels = self.channelsStorage.getChannelsByCriteria(find_criteria.BWLobbyChannelFindCriteria())
for channel in channels:
controller = self.factory(channel)
if controller is not None:
controllers.append(controller)
return controllers
def factory(self, channel):
controller = None
if channel.getName() in LAZY_CHANNEL.ALL:
if channel.getName() == LAZY_CHANNEL.SPECIAL_BATTLES:
controller = bw_lobby_controllers.BSLazyChannelController(channel)
else:
controller = bw_lobby_controllers.LazyChannelController(channel)
elif channel.isPrebattle():
prbType = channel.getPrebattleType()
if prbType is 0:
LOG_ERROR('Prebattle type is not found', channel)
return
if prbType is PREBATTLE_TYPE.TRAINING:
controller = bw_lobby_controllers.TrainingChannelController(channel)
else:
controller = bw_lobby_controllers.PrebattleChannelController(prbType, channel)
elif not channel.isBattle():
controller = bw_lobby_controllers.LobbyChannelController(channel)
return controller
class BattleControllersFactory(IControllerFactory):
@storage_getter('channels')
def channelsStorage(self):
return None
def init(self):
controllers = []
channels = self.channelsStorage.getChannelsByCriteria(find_criteria.BWBattleChannelFindCriteria())
squad = self.channelsStorage.getChannelByCriteria(find_criteria.BWPrbChannelFindCriteria(PREBATTLE_TYPE.SQUAD))
if squad is not None:
channels.append(squad)
for channel in channels:
controller = self.factory(channel)
if controller is not None:
controllers.append(controller)
return controllers
def factory(self, channel):
controller = None
flags = channel.getProtoData().flags
if flags & chat_shared.CHAT_CHANNEL_BATTLE != 0:
if flags & chat_shared.CHAT_CHANNEL_BATTLE_TEAM != 0:
controller = bw_battle_controllers.TeamChannelController(channel)
else:
controller = bw_battle_controllers.CommonChannelController(channel)
elif flags & chat_shared.CHAT_CHANNEL_SQUAD != 0:
controller = bw_battle_controllers.SquadChannelController(channel)
return controller
| [
"info@webium.sk"
] | info@webium.sk |
4b7fa0b89034a3c73e7c53685b6db1d8756161b3 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/13/21/1.py | 3c669ac25e4a3e7827b4a913749aefc3eec998b5 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | def solvable(A, motes, adds):
for m in motes:
while m >= A and adds > 0:
A += A - 1
adds -= 1
if m >= A:
return False
A += m
return True
def solve_case(test_case):
A, N = map(int, raw_input().split())
motes = sorted(map(int, raw_input().split()))
best = 1000000000
for adds in xrange(N + 1):
for removes in xrange(N + 1):
if solvable(A, motes[:N - removes], adds):
best = min(best, adds + removes)
print "Case #{0}: {1}".format(test_case, best)
for test_case in xrange(1, int(raw_input()) + 1):
solve_case(test_case) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
d35cc85e0174d8db3ca2cc290807848677c9a764 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_325/ch21_2020_03_02_20_41_04_179689.py | 7debae918cd7832c89f016bfcd18e1ac314c922c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | x = int(input("quantos dias "))
y = int(input("quantas horas "))
z = int(input("quantos minutos "))
w = int(input("quantos segundos "))
def soma(dias,horas,minutos,segundos):
d = 86400*dias
h = 3600*horas
m = 60*minutos
s = segundos
c = d+h+m+s
return c | [
"you@example.com"
] | you@example.com |
a05fd43c86e27b01c3f109a88d01540a3b937ad3 | e52765058c96b2123aacf690204d8583a90ad145 | /book_figures/chapter1/fig_dr7_quasar.py | d8d68ebc0777cb56647e6aefdfbe601cda1ee2ea | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | astrofanlee/astroML | dcecb1dbc40dcf8c47deeb11230cff62a451e3c9 | acb02f4385049e7a09ee5eab6f1686e7f119b066 | refs/heads/master | 2020-12-29T03:30:56.700274 | 2013-03-04T16:49:40 | 2013-03-04T16:49:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | """
SDSS DR7 Quasars
----------------
This example shows how to fetch the SDSS quasar photometric data, and to
plot the relationship between redshift and color.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
from matplotlib import pyplot as plt
from astroML.datasets import fetch_dr7_quasar
#------------------------------------------------------------
# Fetch the quasar data
data = fetch_dr7_quasar()
# select the first 10000 points
data = data[:10000]
r = data['mag_r']
i = data['mag_i']
z = data['redshift']
#------------------------------------------------------------
# Plot the quasar data
ax = plt.axes()
ax.plot(z, r - i, marker='.', markersize=4, linestyle='none', color='black')
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel('redshift')
ax.set_ylabel('r-i')
plt.show()
| [
"vanderplas@astro.washington.edu"
] | vanderplas@astro.washington.edu |
fd496e277fdefb2a2c9b541c99a63156f780d0f2 | 67640d102dbf68c635fdcbac4ae16e91b327e684 | /demo01_test/test_cases/__init__.py | eea3614887f048b7f90597b92be325518e4f6032 | [] | no_license | B9527/unittest_demo | f2ba28fdda309731f0a925732a06ea9824ec03ce | d93846d3497c8bad66f34796b96d014093cd60b7 | refs/heads/master | 2021-01-21T17:10:35.242416 | 2017-07-28T02:42:13 | 2017-07-28T02:42:13 | 98,510,977 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
#----------------------------------------------
# Copyright (python)
# FileName: __init__.py
# Version: 0.0.2
# Author : baiyang
# LastChange: 2017/7/27 15:19
# Desc:
# History:
#--------------------------------------------
""" | [
"1335239218@qq.com"
] | 1335239218@qq.com |
3e83a4254bd9cd516265553423831b535c48d256 | d62028695590b6c52cbc71af1dde6e7fdecb3f9b | /FALCON/src/utils/default_param.py | 7314c5af350c0464df65e30c7b8c8930b80db20d | [
"Apache-2.0"
] | permissive | zijiantang168/Reproducability-FALCON | 3cf0ed46ddf9df414fcf1baf5adb0a9c42111273 | eef9d8d72ae3b763d6a88107b90db9533afedd9e | refs/heads/master | 2023-04-20T11:07:40.587829 | 2021-05-13T14:04:32 | 2021-05-13T14:04:32 | 367,063,997 | 0 | 0 | Apache-2.0 | 2021-05-13T13:52:37 | 2021-05-13T13:52:37 | null | UTF-8 | Python | false | false | 5,163 | py | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
FALCON: FAst and Lightweight CONvolution
File: utils/default_param.py
- Contain source code for receiving arguments .
Version: 1.0
"""
import argparse
def get_default_param():
"""
Receive arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-train", "--is_train",
help="whether train_test the model (train_test-True; test-False)",
action="store_true")
parser.add_argument("-bs", "--batch_size",
help="batch size of training",
type=int,
default=128)
parser.add_argument("-ep", "--epochs",
help="epochs of training",
type=int,
default=350)
parser.add_argument("-lr", "--learning_rate",
help="set beginning learning rate",
type=float,
default=0.01)
parser.add_argument("-op", "--optimizer",
help="choose optimizer",
choices=["SGD", "Adagrad", "Adam", "RMSprop"],
type=str,
default='SGD')
parser.add_argument("-conv", "--convolution",
help="choose convolution",
choices=["StandardConv",
"FALCON",
"RankFALCON",
"StConvBranch",
"FALCONBranch"],
type=str,
default="StandardConv")
parser.add_argument("-k", "--rank",
help="if the model is Rank K, the rank(k) in range {1,2,3}",
choices=[1, 2, 3, 4],
type=int,
default=1)
parser.add_argument("-al", "--alpha",
help="Width Multiplier in range (0,1]",
# choices=[1, 0.75, 0.5, 0.33, 0.25],
type=float,
default=1)
parser.add_argument("-m", "--model",
help="model type - VGG16/VGG19/ResNet",
choices=['VGG16', 'VGG19', 'ResNet'],
type=str,
default='VGG19')
parser.add_argument("-data", "--datasets",
help="specify datasets - cifar10/cifar100/svhn/mnist/tinyimagenet/imagenet",
choices=['cifar10', 'cifar100', 'svhn', 'mnist', 'tinyimagenet', 'imagenet'],
type=str,
default='cifar100')
parser.add_argument("-ns", "--not_save",
help="do not save the model",
action="store_true")
parser.add_argument("-b", "--beta",
help="balance between classification loss and transfer loss",
type=float,
default=0.0)
parser.add_argument('-bn', '--bn',
action='store_true',
help='add batch_normalization after FALCON')
parser.add_argument('-relu', '--relu',
action='store_true',
help='add relu function after FALCON')
parser.add_argument("-lrd", "--lr_decay_rate",
help="learning rate dacay rate",
type=int,
default=10)
parser.add_argument("-exp", "--expansion",
help="expansion ration in MobileConvV2",
type=float,
default=6.0)
parser.add_argument('-init', '--init',
action='store_true',
help='Whether initialize FALCON')
parser.add_argument("-g", "--groups",
help="groups number for pointwise convolution",
type=int,
default=1)
parser.add_argument("--stconv_path",
help="restore StConv model from the path",
type=str,
default='')
parser.add_argument("--restore_path",
help="restore model from the path",
type=str,
default='')
return parser
| [
"none@none.com"
] | none@none.com |
09906cf7bda1b80f0d3b583f2413572b3f3fcd22 | ab19b1e637109f6a6f32e99714ea1c7cbe1d5ec0 | /articles/migrations/0019_article_publish_after.py | a9d38592c2a964ef44a79aeec0ecba6e0d1db057 | [] | no_license | devonwarren/totemag | daf05876cfe636c4dcfe83b764900a0bc4c9c29d | 304ab0e2f72b926e63de706a6e3dc0b043db36fd | refs/heads/master | 2021-01-17T20:48:48.671352 | 2016-06-02T00:57:11 | 2016-06-02T00:57:11 | 58,146,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-18 22:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0018_auto_20151124_2026'),
]
operations = [
migrations.AddField(
model_name='article',
name='publish_after',
field=models.DateTimeField(blank=True, help_text='If set will be published after this point automatically', null=True),
),
]
| [
"devon.warren@gmail.com"
] | devon.warren@gmail.com |
f993e3b5fb115450f780e5412a64f240b4667778 | 9e7d7b4d029554eed0f760a027cd94558b919ae2 | /chapter2/continue_break_statements.py | e9af6c8a148f493882b7cecd565d9844fcdfc65c | [] | no_license | pooja1506/AutomateTheBoringStuff_2e | 8247b68a195d5e1976c6474f0e97d947906ffd35 | 5bab9ccdcdb22ee10fe1272c91042be40fd67c17 | refs/heads/master | 2022-04-10T19:21:44.402829 | 2020-04-05T12:10:32 | 2020-04-05T12:10:32 | 249,620,282 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | while True:
print("who are you?")
name = input()
if name != 'Joe':
continue
print("hello joe , please enter your password")
password = input()
if password == 'Seasword':
break
print("access granted")
#continue statement is used to jump back to the start of the loop to re-evaluate the input until its true
#break statement is used to immediately exit the while loop clause | [
"pooja.dmehta15@gmail.com"
] | pooja.dmehta15@gmail.com |
901b1b56c55239bb850491d05e8b6501220bd9f6 | 7cf119239091001cbe687f73018dc6a58b5b1333 | /datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_CJXW_ZYCJ/ZX_CJXW_ZYCJ_JJW_CJZQ.py | bb53ef4acf7a8bb3b4a7661eb8cf6b64186458af | [
"Apache-2.0"
] | permissive | ILKKAI/dataETL | 0f5b80c3482994f735f092a1e01fa1009bac4109 | 32f7ec3aaaf32b5074536a615cb9cd5c28bd499c | refs/heads/master | 2022-04-04T19:27:05.747852 | 2020-02-28T11:17:48 | 2020-02-28T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_CJXW_ZYCJ_JJW_CJZQ", mongo_collection="ZX_CJXW_ZYCJ")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
| [
"499413642@qq.com"
] | 499413642@qq.com |
e6bdd6de0cabea72e54e64e2694a3cd452cc4fa4 | bad6970aa7c929bcd8447106c1f3af6567a42456 | /tests/test_snowflake.py | 70f14b17835adcae044d93f7c7c15fc290185db5 | [
"MIT"
] | permissive | wrodney/simple-ddl-parser | ad498221367bf657ed94106feded5ff8c3fdb46b | 6e4d1d65f74fa2f9266a4f9e28bd8b6e42ddaa14 | refs/heads/main | 2023-08-15T01:26:48.214664 | 2021-09-22T06:20:11 | 2021-09-22T06:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,073 | py | from simple_ddl_parser import DDLParser
def test_clone_db():
ddl = """
create database mytestdb_clone clone mytestdb;
"""
result = DDLParser(ddl).run(group_by_type=True)
expected = {
"databases": [
{"clone": {"from": "mytestdb"}, "database_name": "mytestdb_clone"}
],
"domains": [],
"schemas": [],
"sequences": [],
"tables": [],
"types": [],
}
assert result == expected
def test_clone_table():
expected = {
"domains": [],
"schemas": [],
"sequences": [],
"tables": [
{
"alter": {},
"checks": [],
"columns": [],
"index": [],
"like": {"schema": None, "table_name": "orders"},
"partitioned_by": [],
"primary_key": [],
"schema": None,
"table_name": "orders_clone",
"tablespace": None,
}
],
"types": [],
}
ddl = """
create table orders_clone clone orders;
"""
result = DDLParser(ddl).run(group_by_type=True)
assert expected == result
def test_clone_schema():
expected = {
"domains": [],
"schemas": [
{"clone": {"from": "testschema"}, "schema_name": "mytestschema_clone"}
],
"sequences": [],
"tables": [],
"types": [],
}
ddl = """
create schema mytestschema_clone clone testschema;
"""
result = DDLParser(ddl).run(group_by_type=True)
assert expected == result
def test_cluster_by():
ddl = """
create table mytable (date timestamp_ntz, id number, content variant) cluster by (date, id);
"""
result = DDLParser(ddl).run(group_by_type=True)
expected = {
"domains": [],
"schemas": [],
"sequences": [],
"tables": [
{
"alter": {},
"checks": [],
"cluster_by": ["date", "id"],
"columns": [
{
"check": None,
"default": None,
"name": "date",
"nullable": True,
"references": None,
"size": None,
"type": "timestamp_ntz",
"unique": False,
},
{
"check": None,
"default": None,
"name": "id",
"nullable": True,
"references": None,
"size": None,
"type": "number",
"unique": False,
},
{
"check": None,
"default": None,
"name": "content",
"nullable": True,
"references": None,
"size": None,
"type": "variant",
"unique": False,
},
],
"index": [],
"partitioned_by": [],
"primary_key": [],
"schema": None,
"table_name": "mytable",
"tablespace": None,
}
],
"types": [],
}
assert expected == result
def test_enforced():
ddl = """
create table table2 (
col1 integer not null,
col2 integer not null,
constraint pkey_1 primary key (col1, col2) not enforced
);
"""
result = DDLParser(ddl).run(group_by_type=True)
expected = {
"domains": [],
"schemas": [],
"sequences": [],
"tables": [
{
"alter": {},
"checks": [],
"columns": [
{
"check": None,
"default": None,
"name": "col1",
"nullable": False,
"references": None,
"size": None,
"type": "integer",
"unique": False,
},
{
"check": None,
"default": None,
"name": "col2",
"nullable": False,
"references": None,
"size": None,
"type": "integer",
"unique": False,
},
],
"index": [],
"partitioned_by": [],
"primary_key": [],
"primary_key_enforced": False,
"schema": None,
"table_name": "table2",
"tablespace": None,
}
],
"types": [],
}
assert expected == result
| [
"xnuinside@gmail.com"
] | xnuinside@gmail.com |
85d28b2ed8514cc96ce157349b50199f1c467e84 | 39f37b192565bf0a30252099a0310d0394b5cb2c | /deepsearch/cps/apis/public/models/annotated_text.py | 2e9d8b1de43c2ba2f27f3f35027cb62caa3a2b3a | [
"MIT"
] | permissive | DS4SD/deepsearch-toolkit | 5a6608b744bcf2d7da5106ece735691ee10f79fb | 20d8198db6d5a75cfe374060910a0375312dadef | refs/heads/main | 2023-08-04T14:17:21.345813 | 2023-07-28T11:51:18 | 2023-07-28T11:51:18 | 498,645,500 | 88 | 13 | MIT | 2023-09-07T15:17:29 | 2022-06-01T08:05:41 | Python | UTF-8 | Python | false | false | 6,220 | py | # coding: utf-8
"""
Corpus Processing Service (CPS) API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from deepsearch.cps.apis.public.configuration import Configuration
class AnnotatedText(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'entities': 'dict(str, list[EntityAnnotation])',
'properties': 'object',
'relationships': 'dict(str, list[object])',
'text': 'str'
}
attribute_map = {
'entities': 'entities',
'properties': 'properties',
'relationships': 'relationships',
'text': 'text'
}
def __init__(self, entities=None, properties=None, relationships=None, text=None, local_vars_configuration=None): # noqa: E501
"""AnnotatedText - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._entities = None
self._properties = None
self._relationships = None
self._text = None
self.discriminator = None
self.entities = entities
self.properties = properties
self.relationships = relationships
self.text = text
@property
def entities(self):
"""Gets the entities of this AnnotatedText. # noqa: E501
:return: The entities of this AnnotatedText. # noqa: E501
:rtype: dict(str, list[EntityAnnotation])
"""
return self._entities
@entities.setter
def entities(self, entities):
"""Sets the entities of this AnnotatedText.
:param entities: The entities of this AnnotatedText. # noqa: E501
:type: dict(str, list[EntityAnnotation])
"""
if self.local_vars_configuration.client_side_validation and entities is None: # noqa: E501
raise ValueError("Invalid value for `entities`, must not be `None`") # noqa: E501
self._entities = entities
@property
def properties(self):
"""Gets the properties of this AnnotatedText. # noqa: E501
:return: The properties of this AnnotatedText. # noqa: E501
:rtype: object
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this AnnotatedText.
:param properties: The properties of this AnnotatedText. # noqa: E501
:type: object
"""
if self.local_vars_configuration.client_side_validation and properties is None: # noqa: E501
raise ValueError("Invalid value for `properties`, must not be `None`") # noqa: E501
self._properties = properties
@property
def relationships(self):
"""Gets the relationships of this AnnotatedText. # noqa: E501
:return: The relationships of this AnnotatedText. # noqa: E501
:rtype: dict(str, list[object])
"""
return self._relationships
@relationships.setter
def relationships(self, relationships):
"""Sets the relationships of this AnnotatedText.
:param relationships: The relationships of this AnnotatedText. # noqa: E501
:type: dict(str, list[object])
"""
if self.local_vars_configuration.client_side_validation and relationships is None: # noqa: E501
raise ValueError("Invalid value for `relationships`, must not be `None`") # noqa: E501
self._relationships = relationships
@property
def text(self):
"""Gets the text of this AnnotatedText. # noqa: E501
:return: The text of this AnnotatedText. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this AnnotatedText.
:param text: The text of this AnnotatedText. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and text is None: # noqa: E501
raise ValueError("Invalid value for `text`, must not be `None`") # noqa: E501
self._text = text
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AnnotatedText):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AnnotatedText):
return True
return self.to_dict() != other.to_dict()
| [
"dol@zurich.ibm.com"
] | dol@zurich.ibm.com |
b6136bca21599639df960e68a3941f35468a6848 | 67f36de3eec7f64b4b1070727ab7bc34e38d3724 | /apps/users/migrations/0002_usersdesc.py | 18e5cba06a0ea729d80fe2f82125dfdd954abab7 | [] | no_license | shd0812/django_shd_restframework | a46371756accffddfe3e915dc22f96f0bc2f0740 | d6ab9c2896b0e3766e17983019d2ac09a8c81b99 | refs/heads/main | 2023-01-19T11:17:19.097711 | 2020-11-22T11:25:05 | 2020-11-22T11:25:05 | 315,007,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # Generated by Django 3.1.2 on 2020-11-15 04:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UsersDesc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('favorite', models.CharField(max_length=100)),
('address', models.CharField(max_length=100)),
('phone', models.CharField(max_length=11, unique=True)),
('users', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='users.userinfo')),
],
options={
'db_table': 'tb_users_desc',
},
),
]
| [
"759275499@qq.com"
] | 759275499@qq.com |
bf4b2947db1bed8d4e6c36d47effa7e96ffa5242 | ded13e921c8365c6113911a5834969ec3d33f989 | /063/Unique Paths II.py | a082d0d5a27c38b339744a82a15a7aaff03aecc0 | [] | no_license | ArrayZoneYour/LeetCode | b7b785ef0907640623e5ab8eec1b8b0a9d0024d8 | d09f56d4fef859ca4749dc753d869828f5de901f | refs/heads/master | 2021-04-26T23:03:10.026205 | 2018-05-09T15:49:08 | 2018-05-09T15:49:08 | 123,922,098 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # /usr/bin/python
# coding: utf-8
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
height = len(obstacleGrid)
width = len(obstacleGrid[0])
step_num = [[0 for col in range(width)] for row in range(height)]
if not obstacleGrid[0][0]:
step_num[0][0] = 1
for row in range(1, height):
if not obstacleGrid[row][0]:
step_num[row][0] = step_num[row-1][0]
for col in range(1, width):
if not obstacleGrid[0][col]:
step_num[0][col] = step_num[0][col-1]
for row in range(1, height):
for col in range(1, width):
if not obstacleGrid[row][col]:
step_num[row][col] = step_num[row-1][col] + step_num[row][col-1]
return step_num[-1][-1]
print(Solution().uniquePathsWithObstacles([
[0,0,0],
[0,1,0],
[0,0,0]
])) | [
"hustliyidong@gmail.com"
] | hustliyidong@gmail.com |
5b2b9ffdd1e257920851c47cf8527bcd03b9c247 | 0b134572e3ac3903ebb44df6d4138cbab9d3327c | /app/grandchallenge/workstation_configs/migrations/0008_auto_20210920_1439.py | a5c71b249716b80883be22b6699341c64c3c59fb | [
"Apache-2.0"
] | permissive | comic/grand-challenge.org | 660de3bafaf8f4560317f1dfd9ae9585ec272896 | dac25f93b395974b32ba2a8a5f9e19b84b49e09d | refs/heads/main | 2023-09-01T15:57:14.790244 | 2023-08-31T14:23:04 | 2023-08-31T14:23:04 | 4,557,968 | 135 | 53 | Apache-2.0 | 2023-09-14T13:41:03 | 2012-06-05T09:26:39 | Python | UTF-8 | Python | false | false | 1,446 | py | # Generated by Django 3.1.13 on 2021-09-20 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
(
"workstation_configs",
"0007_workstationconfig_auto_jump_center_of_gravity",
)
]
operations = [
migrations.RemoveField(
model_name="workstationconfig", name="client_rendered_sidebar"
),
migrations.AddField(
model_name="workstationconfig",
name="show_algorithm_output_plugin",
field=models.BooleanField(
default=True,
help_text="Show algorithm outputs with navigation controls",
),
),
migrations.AddField(
model_name="workstationconfig",
name="show_image_switcher_plugin",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="workstationconfig",
name="show_lut_selection_tool",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="workstationconfig",
name="show_overlay_plugin",
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name="workstationconfig",
name="show_overlay_selection_tool",
field=models.BooleanField(default=True),
),
]
| [
"noreply@github.com"
] | comic.noreply@github.com |
d12a3c1a0a1d4a430b8fb15efcaa15d9f558aec6 | 892ab4d7836e369d0e657be91b4dcd3e8153f372 | /compute/wps/tests/test_tasks_edas.py | 34133d825a836e498dd903b6530238665d2e93a4 | [] | no_license | davidcaron/esgf-compute-wps | 9e28e3af1ec1f524abdcce39cfe42c7bbcb3d50b | c3ffca449f65f8d206032d040ead6e14085f04ab | refs/heads/master | 2020-04-16T20:54:59.095653 | 2018-05-04T22:00:45 | 2018-05-04T22:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,793 | py | import os
import shutil
import cwt
import mock
from django import test
from . import helpers
from wps import models
from wps import settings
from wps import WPSError
from wps.tasks import cdat
from wps.tasks import edas
class EDASTaskTestCase(test.TestCase):
fixtures = ['processes.json', 'servers.json', 'users.json']
def setUp(self):
self.user = models.User.objects.all()[0]
self.server = models.Server.objects.all()[0]
self.process = models.Process.objects.all()[0]
def test_check_exceptions_error(self):
with self.assertRaises(WPSError) as e:
edas.check_exceptions('!<response><exceptions><exception>error</exception></exceptions></response>')
def test_check_exceptions(self):
edas.check_exceptions('!<response></response>')
def test_listen_edas_output_timeout(self):
mock_self = mock.MagicMock()
mock_poller = mock.MagicMock(**{'poll.return_value': {}})
mock_job = mock.MagicMock()
with self.assertRaises(WPSError) as e:
edas.listen_edas_output(mock_self, mock_poller, mock_job)
def test_listen_edas_output_heartbeat(self):
mock_self = mock.MagicMock()
mock_poller = mock.MagicMock(**{'poll.return_value': [(mock.MagicMock(**{'recv.side_effect': ['response', 'file']}), 0)]})
mock_job = mock.MagicMock()
result = edas.listen_edas_output(mock_self, mock_poller, mock_job)
self.assertIsInstance(result, str)
self.assertEqual(result, 'file')
def test_listen_edas_output(self):
mock_self = mock.MagicMock()
mock_poller = mock.MagicMock(**{'poll.return_value': [(mock.MagicMock(**{'recv.return_value': 'file'}), 0)]})
mock_job = mock.MagicMock()
result = edas.listen_edas_output(mock_self, mock_poller, mock_job)
self.assertIsInstance(result, str)
self.assertEqual(result, 'file')
@mock.patch('wps.tasks.edas.cdms2.open')
@mock.patch('shutil.move')
@mock.patch('wps.tasks.edas.listen_edas_output')
@mock.patch('wps.tasks.edas.initialize_socket')
def test_edas_submit_listen_failed(self, mock_init_socket, mock_listen, mock_move, mock_open):
mock_open.return_value = mock.MagicMock()
mock_open.return_value.__enter__.return_value.variables.keys.return_value = ['tas']
mock_job = mock.MagicMock()
mock_listen.return_value = None
variables = {
'v0': {'id': 'tas|v0', 'uri': 'file:///test.nc'},
}
operation = {
'name': 'EDAS.sum',
'input': ['v0'],
'axes': 'time'
}
with self.assertRaises(WPSError) as e:
edas.edas_submit({}, variables, {}, operation, user_id=self.user.id, job_id=0)
@mock.patch('wps.tasks.edas.process.Process')
@mock.patch('wps.tasks.edas.cdms2.open')
@mock.patch('shutil.move')
@mock.patch('wps.tasks.edas.listen_edas_output')
@mock.patch('wps.tasks.edas.initialize_socket')
def test_edas_submit(self, mock_init_socket, mock_listen, mock_move, mock_open, mock_process):
mock_open.return_value = mock.MagicMock()
mock_open.return_value.__enter__.return_value.variables.keys.return_value = ['tas']
mock_job = mock.MagicMock()
variables = {
'v0': {'id': 'tas|v0', 'uri': 'file:///test.nc'},
}
operation = {
'name': 'EDAS.sum',
'input': ['v0'],
'axes': 'time'
}
result = edas.edas_submit({}, variables, {}, operation, user_id=self.user.id, job_id=0)
self.assertIsInstance(result, dict)
calls = mock_init_socket.call_args_list
self.assertEqual(calls[0][0][3], 5670)
self.assertEqual(calls[1][0][3], 5671)
| [
"boutte.jason@gmail.com"
] | boutte.jason@gmail.com |
8fd790caf96a76d675989b45a6882cf5d8639859 | 824a58f05b24ef9ec0920aa046498c816f5c5121 | /models.py | 39fe32464103cff49911ac03919aff948c927db7 | [] | no_license | zhakguder/adversarial-generation | c383e4338b53e3763ccbf572644ff8261d717ea6 | 10d2eb64f1b6a9117d86f3333a236c25b399dd3a | refs/heads/master | 2020-05-17T16:40:50.319921 | 2019-05-21T14:25:50 | 2019-05-21T14:25:50 | 183,825,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,985 | py | from functools import partial
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Flatten
from utils import _softplus_inverse
import tensorflow_probability as tfp
from custom_layers import LSHLayer, clusterLayer
from settings import get_settings
from functools import reduce
tfd = tfp.distributions
tfpl = tfp.layers
from ipdb import set_trace
flags, params = get_settings()
forward_calls = ''
layer_count = 0
def build_net(hidden_dims, trainable=True):
dense_relu = partial(Dense, activation='tanh')
net = Sequential()
if forward_calls in ['encoder', 'mnist']:
prev_dim = (28, 28)
elif forward_calls == 'decoder':
prev_dim = params['latent_dim']
if forward_calls in ['mnist', 'encoder']:
net.add(Flatten(input_shape=prev_dim))
prev_dim = reduce(lambda x,y: x*y, prev_dim)
for idx, dim in enumerate(hidden_dims):
net.add(dense_relu(dim, name="{}_relu_{}".format(forward_calls, idx), input_shape = [prev_dim], trainable=trainable)) #
#print('Dim: {}'.format(prev_dim))
prev_dim = dim
return net
def make_encoder(hidden_dims, latent_dim, out_activation, network=None):
global forward_calls
forward_calls = 'encoder'
if network is not None:
encoder_net = network
else:
encoder_net = build_net(hidden_dims)
encoder_net.add(Dense(latent_dim * 2, activation = out_activation, name = '{}_{}'.format(forward_calls, out_activation)))
def encoder(inputs):
outputs = encoder_net(inputs)
return outputs
return encoder, encoder_net
def make_decoder(hidden_dims, output_dim, network=None):
global forward_calls
out_activation = 'linear'
forward_calls = 'decoder'
if network is not None:
decoder_net = network
else:
decoder_net = build_net(hidden_dims)
decoder_net.add(Dense(output_dim, activation = out_activation, name = '{}_{}'.format(forward_calls, out_activation)))
def decoder(sample):
reconstruction = decoder_net(sample)
return reconstruction
return decoder, decoder_net
def make_lsh(dim, w):
net = Sequential()
net.add(LSHLayer(dim, w))
def lsh(reconstructions):
hash_codes = net(reconstructions)
return hash_codes
return lsh, net
def make_cluster():
net = Sequential()
net.add(clusterLayer())
def cluster(inputs):
q_s = net(inputs)
return q_s
return cluster
def make_mnist(network_dims):
global forward_calls
forward_calls = 'mnist'
net = build_net(network_dims, trainable=True)
net.add(Dense(10, activation='linear', trainable=True))
return net
def initialize_eval_mnist(net):
data = tf.random.normal((1, 784))
net(data)
return net
def set_mnist_weights(net, weights):
used = 0
for i, layer in enumerate(net.layers):
if i > 0:
weight_shape = layer.weights[0].shape
bias_shape = layer.weights[1].shape
n_weight = tf.reduce_prod(weight_shape).numpy()
n_bias = tf.reduce_prod(bias_shape).numpy()
tmp_used = used + n_weight
layer_weights = tf.reshape(weights[used:tmp_used], weight_shape)
used = tmp_used
tmp_used += n_bias
layer_biases = weights[used:tmp_used]
used = tmp_used
net.layers[i].set_weights([layer_weights, layer_biases])
return net
def mnist_classifier_net(input_shape, output_shape, training):
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=input_shape),
tf.keras.layers.Dense(128, activation='relu', trainable=training),
tf.keras.layers.Dense(256, activation='relu', trainable=training),
tf.keras.layers.Dense(output_shape, trainable=training)
])
return net
def cifar10_classifier_net(filters_array, dropout_array, input_shape, output_shape, training):
from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Dropout
net = tf.keras.models.Sequential()
layer_count = 0
for filters, dropout in zip (filters_array, dropout_array):
for i in range(2):
if layer_count == 0:
net.add(
Conv2D(filters, (3,3), padding='same', activation='relu', trainable=training, input_shape = input_shape))
net.add(BatchNormalization())
layer_count +=1
else:
net.add(Conv2D(filters, (3,3), padding='same', activation='relu', trainable=training))
net.add(BatchNormalization())
layer_count += 1
net.add(MaxPooling2D(pool_size=(2,2)))
net.add(Dropout(dropout))
net.add(Flatten())
net.add(tf.keras.layers.Dense(output_shape, trainable=training))
return net
if __name__ == '__main__':
net = cifar10_classifier_net([32, 64, 128], [0.2, 0.3, 0.4], 10, True)
| [
"zeynep.hakguder@huskers.unl.edu"
] | zeynep.hakguder@huskers.unl.edu |
2528570aad7cb0dbdf5b4fe262b0745d0d60e920 | ecb4eb32a75e626ebee29142fa0b28e18362dd8c | /adam/domain/document_item.py | c3eb014f1ffdab1b17baccbf0ead952880b7b1d3 | [
"BSD-3-Clause"
] | permissive | souzaux/adam | a2e29d210ccfbf8eb02a5f1a1dc5be297515fdc3 | cbb261b909adde39b874d355c47fe3824cd3e9e1 | refs/heads/master | 2020-05-30T13:25:55.290349 | 2016-08-04T09:22:12 | 2016-08-04T09:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | # -*- coding: utf-8 -*-
"""
adam.domain.document_item.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'document_item' schema settings.
:copyright: (c) 2016 by Nicola Iarocci and CIR2000.
:license: BSD, see LICENSE for more details.
"""
from common import variation
from vat import vat_field
from warehouses import warehouse_field
detail = {
'type': 'dict',
'schema': {
'sku': {'type': 'string'},
'description': {'type': 'string'},
'color': {'type': 'string'},
'unit_of_measure': {'type': 'string'},
'notes': {'type': 'string'},
'serial_number': {'type': 'string'},
'lot': {
'type': 'dict',
'schema': {
'number': {'type': 'string'},
'date': {'type': 'datetime'},
'expiration': {'type': 'datetime'},
}
},
'size': {
'type': 'dict',
'schema': {
'number': {'type': 'string'},
'name': {'type': 'string'},
}
},
}
}
item = {
'type': 'dict',
'schema': {
'guid': {'type': 'string'},
'quantity': {'type': 'float'},
'processed_quantity': {'type': 'float'},
'price': {'type': 'integer'},
'net_price': {'type': 'integer'},
'price_vat_inclusive': {'type': 'integer'},
'total': {'type': 'integer'},
'withholding_tax': {'type': 'boolean'},
'commission': {'type': 'float'},
'area_manager_commission': {'type': 'float'},
'detail': detail,
'vat': vat_field,
'price_list': {'type': 'string'},
'warehouse': warehouse_field,
'variation_collection': variation,
}
}
| [
"nicola@nicolaiarocci.com"
] | nicola@nicolaiarocci.com |
b9ada5a859b6f7ba9697a4b51f1a7f8c32c8c74f | e976eb4db57ddee4947cbab8746446dd53f6cf6f | /1-50/与所有单词相关联的字串.py | 745ac8d7d7a3e650d68f9b5975bee5c1a37da875 | [] | no_license | Aiyane/aiyane-LeetCode | 5328529079bcfbc84f4e4d67e3d8736b9745dc0d | 3c4d5aacc33f3ed66b6294894a767862170fb4f6 | refs/heads/master | 2020-04-01T20:33:54.125654 | 2019-06-25T09:56:10 | 2019-06-25T09:56:10 | 153,610,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | #!/usr/bin/env/python3
# -*- coding: utf-8 -*-
# 与所有单词相关联的字串.py
"""
给定一个字符串 s 和一些长度相同的单词 words。在 s 中找出可以恰好串联 words 中所有单词的子串的起始位置。
注意子串要与 words 中的单词完全匹配,中间不能有其他字符,但不需要考虑 words 中单词串联的顺序。
示例 1:
输入:
s = "barfooothefooobarman",
words = ["fooo","bar"]
输出: [0,9]
解释: 从索引 0 和 9 开始的子串分别是 "barfoor" 和 "foobar" 。
输出的顺序不重要, [9,0] 也是有效答案。
示例 2:
输入:
s = "wordgoodstudentgoodword",
words = ["word","student"]
输出: []
"""
"""
思路:构造单词字典,因为长度是一样的,所以大循环里只需要循环min(width, length_s - length_words + 1)
如果获取的单词比结果还多,从第一个单词开始去掉,直到符合结果
"""
import profile
class Solution:
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
if not s or not words:
return []
length_s = len(s)
width = len(words[0])
length_words = len(words)*width
if length_s < length_words:
return []
result = []
# 首先构造单词表次数字典
times = dict()
for word in words:
if word not in times:
times[word] = 1
else:
times[word] += 1
# 按照单词长度遍历,所以从单词长度开始重复,或者字符串长度减去单词长度即可
ll = min(width, length_s - length_words + 1)
for i in range(ll):
s_start, s_end = i, i
d = dict()
while s_start + width <= length_s:
word = s[s_end:s_end+width]
s_end += width
if word not in times:
s_start = s_end
d.clear()
# 如果长度不够,提前结束
if length_s - s_start < length_words:
break
else:
if word not in d:
d[word] = 1
else:
d[word] += 1
# 如果获取的单词比结果还多,从第一个单词开始去掉,直到符合结果
while d[word] > times[word]:
d[s[s_start:s_start+width]] -= 1
s_start += width
if s_end - s_start == length_words:
result.append(s_start)
return result
def main():
s = "wordgoodgoodgoodbestword"
words = ["word","good","best","word"]
# s = "barfoothefoobarman"
# words = ["foo", "bar"]
sol = Solution()
print(sol.findSubstring(s, words))
if __name__ == '__main__':
profile.run('main()')
| [
"2310091880qq@gmail.com"
] | 2310091880qq@gmail.com |
3569013ca6cef03a594258fb5f91a3563d856154 | 801f367bd19b8f2ab08669fd0a85aad7ace961ac | /cleaned_version/_train_utils/utils.py | b830833f01ab67eaec0665e6da7d821dc5b770b1 | [
"MIT"
] | permissive | Wendong-Huo/thesis-bodies | d91b694a6b1b6a911476573ed1ed27eb27fb000d | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | refs/heads/main | 2023-04-17T18:32:38.541537 | 2021-03-12T19:53:23 | 2021-03-12T19:53:23 | 623,471,326 | 1 | 0 | null | 2023-04-04T12:45:48 | 2023-04-04T12:45:47 | null | UTF-8 | Python | false | false | 6,053 | py | import glob
import os
import gym
from stable_baselines3 import A2C, DDPG, DQN, HER, PPO, SAC, TD3
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines3.common.utils import set_random_seed
from stable_baselines3.common.callbacks import BaseCallback, EvalCallback
from policies.ppo_with_body_info import PPO_with_body_info
from policies.ppo_without_body_info import PPO_without_body_info
ALGOS = {
# "a2c": A2C,
# "ddpg": DDPG,
# "dqn": DQN,
# "her": HER,
# "sac": SAC,
# "td3": TD3,
"ppo": PPO_without_body_info,
"ppo_w_body": PPO_with_body_info
}
def linear_schedule(initial_value):
"""
Linear learning rate schedule.
:param initial_value: (float or str)
:return: (function)
"""
if isinstance(initial_value, str):
initial_value = float(initial_value)
def func(progress):
"""
Progress will decrease from 1 (beginning) to 0
:param progress: (float)
:return: (float)
"""
return progress * initial_value
return func
def get_latest_run_id(log_path, env_id):
"""
Returns the latest run number for the given log name and log path,
by finding the greatest number in the directories.
:param log_path: (str) path to log folder
:param env_id: (str)
:return: (int) latest run number
"""
max_run_id = 0
for path in glob.glob(log_path + "/{}_[0-9]*".format(env_id)):
file_name = path.split("/")[-1]
ext = file_name.split("_")[-1]
if env_id == "_".join(file_name.split("_")[:-1]) and ext.isdigit() and int(ext) > max_run_id:
max_run_id = int(ext)
return max_run_id
def create_env(n_envs, env_id, kwargs, seed=0, normalize=False, normalize_kwargs=None, eval_env=False, log_dir=None):
"""
Create the environment and wrap it if necessary
:param n_envs: (int)
:param eval_env: (bool) Whether is it an environment used for evaluation or not
:param no_log: (bool) Do not log training when doing hyperparameter optim
(issue with writing the same file)
:return: (Union[gym.Env, VecEnv])
"""
if n_envs == 1:
# use rank=127 so eval_env won't overlap with any training_env.
env = DummyVecEnv(
[make_env(env_id, 127, seed, log_dir=log_dir, env_kwargs=kwargs)]
)
else:
# env = SubprocVecEnv([make_env(env_id, i, args.seed) for i in range(n_envs)])
# On most env, SubprocVecEnv does not help and is quite memory hungry
env = DummyVecEnv(
[
make_env(env_id, i, seed, log_dir=log_dir, env_kwargs=kwargs[i])
for i in range(n_envs)
]
)
if normalize:
# Copy to avoid changing default values by reference
local_normalize_kwargs = normalize_kwargs.copy()
# Do not normalize reward for env used for evaluation
if eval_env:
if len(local_normalize_kwargs) > 0:
local_normalize_kwargs["norm_reward"] = False
else:
local_normalize_kwargs = {"norm_reward": False}
env = VecNormalize(env, **local_normalize_kwargs)
return env
def make_env(env_id, rank=0, seed=0, log_dir=None, wrapper_class=None, env_kwargs=None):
"""
Helper function to multiprocess training
and log the progress.
:param env_id: (str)
:param rank: (int)
:param seed: (int)
:param log_dir: (str)
:param wrapper_class: (Type[gym.Wrapper]) a subclass of gym.Wrapper
to wrap the original env with
:param env_kwargs: (Dict[str, Any]) Optional keyword argument to pass to the env constructor
"""
if log_dir is not None:
os.makedirs(log_dir, exist_ok=True)
if env_kwargs is None:
env_kwargs = {}
def _init():
set_random_seed(seed * 128 + rank)
env = gym.make(env_id, **env_kwargs)
# Wrap first with a monitor (e.g. for Atari env where reward clipping is used)
log_file = os.path.join(log_dir, str(rank)) if log_dir is not None else None
# Monitor success rate too for the real robot
info_keywords = ("is_success",) if "Neck" in env_id else ()
env = Monitor(env, log_file, info_keywords=info_keywords)
# Dict observation space is currently not supported.
# https://github.com/hill-a/stable-baselines/issues/321
# We allow a Gym env wrapper (a subclass of gym.Wrapper)
if wrapper_class:
env = wrapper_class(env)
env.seed(seed * 128 + rank)
return env
return _init
class SaveVecNormalizeCallback(BaseCallback):
"""
Callback for saving a VecNormalize wrapper every ``save_freq`` steps
:param save_freq: (int)
:param save_path: (str) Path to the folder where ``VecNormalize`` will be saved, as ``vecnormalize.pkl``
:param name_prefix: (str) Common prefix to the saved ``VecNormalize``, if None (default)
only one file will be kept.
"""
def __init__(self, save_freq: int, save_path: str, name_prefix=None, verbose=0):
super(SaveVecNormalizeCallback, self).__init__(verbose)
self.save_freq = save_freq
self.save_path = save_path
self.name_prefix = name_prefix
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.save_freq == 0:
if self.name_prefix is not None:
path = os.path.join(self.save_path, f"{self.name_prefix}_{self.num_timesteps}_steps.pkl")
else:
path = os.path.join(self.save_path, "vecnormalize.pkl")
if self.model.get_vec_normalize_env() is not None:
self.model.get_vec_normalize_env().save(path)
if self.verbose > 1:
print(f"Saving VecNormalize to {path}")
return True
| [
"sliu1@uvm.edu"
] | sliu1@uvm.edu |
065d7e8d210b3c6e4e6c55a6995e288bbd83b8c6 | fe0017ae33385d7a2857d0aa39fa8861b40c8a88 | /env/lib/python3.8/site-packages/sklearn/mixture/base.py | 1daf2061f2fd01b8a342289d9497009a817083cd | [] | no_license | enriquemoncerrat/frasesback | eec60cc7f078f9d24d155713ca8aa86f401c61bf | e2c77f839c77f54e08a2f0930880cf423e66165b | refs/heads/main | 2023-01-03T23:21:05.968846 | 2020-10-18T21:20:27 | 2020-10-18T21:20:27 | 305,198,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
# mypy error: Module X has no attribute y (typically for C extensions)
from . import _base # type: ignore
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.mixture.base'
correct_import_path = 'sklearn.mixture'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_base, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| [
"enriquemoncerrat@gmail.com"
] | enriquemoncerrat@gmail.com |
282b16e40ec888666211472ca10dba0e709687b1 | 81fe7f2faea91785ee13cb0297ef9228d832be93 | /HackerRank/Contests/WeekOfCode21/kangaroo.py | 0ba85ccd8448b9b12f2e0718b459aa938a331f45 | [] | no_license | blegloannec/CodeProblems | 92349c36e1a35cfc1c48206943d9c2686ea526f8 | 77fd0fa1f1a519d4d55265b9a7abf12f1bd7d19e | refs/heads/master | 2022-05-16T20:20:40.578760 | 2021-12-30T11:10:25 | 2022-04-22T08:11:07 | 54,330,243 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | #!/usr/bin/env python
import sys
def main():
x1,v1,x2,v2 = map(int,sys.stdin.readline().split())
if x1==x2:
print 'YES'
elif v1==v2:
print 'NO'
elif (x1-x2)*(v2-v1)>=0 and (x1-x2)%(v2-v1)==0:
print 'YES'
else:
print 'NO'
main()
| [
"blg@gmx.com"
] | blg@gmx.com |
8d1ae47c9f07f66017f56445147ce843cb789c27 | 504a5e7c9319bda04e3d33978f64404bba47392a | /Python 200/034.py | 4afcfdef5e71d2866eeb60876b0ee093ce0122a4 | [] | no_license | Ani-Gil/Python | 9fd02d321a7e21c07ea5fa30ae0f0336cae15861 | 5bb019bfbe19f10f20f6c5883299011717d20f55 | refs/heads/main | 2023-03-15T02:21:07.467329 | 2021-02-24T01:20:00 | 2021-02-24T01:20:00 | 324,080,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # 034.py - 문자열 포맷팅 이해하기
txt1 = '자바'; txt2 = '파이썬'
num1 = 5; num2 = 10
print('나는 %s보다 %s에 더 익숙합니다.' % (txt1, txt2))
print('%s은 %s보다 %d배 더 쉽습니다.' % (txt2, txt1, num1))
print('%d + %d + %d' % (num1, num2, num1 + num2))
print('작년 세계 경제 성장률은 전년에 비해 %d%% 포인트 증가했다.' % num1)
| [
"dlatldn426@gmail.com"
] | dlatldn426@gmail.com |
8ff9155fca196073894615de07c53e2ea89d6b7b | 364b36d699d0a6b5ddeb43ecc6f1123fde4eb051 | /_downloads_1ed/fig_lyndenbell_toy.py | 80a9743fd39ab85865499aac101b2c96c46b7eaf | [] | no_license | astroML/astroml.github.com | eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca | 70f96d04dfabcd5528978b69c217d3a9a8bc370b | refs/heads/master | 2022-02-27T15:31:29.560052 | 2022-02-08T21:00:35 | 2022-02-08T21:00:35 | 5,871,703 | 2 | 5 | null | 2022-02-08T21:00:36 | 2012-09-19T12:55:23 | HTML | UTF-8 | Python | false | false | 4,082 | py | """
Luminosity function code on toy data
------------------------------------
Figure 4.9.
An example of using Lynden-Bell's C- method to estimate a bivariate
distribution from a truncated sample. The lines in the left panel show the true
one-dimensional distributions of x and y (truncated Gaussian distributions).
The two-dimensional distribution is assumed to be separable; see eq. 4.85.
A realization of the distribution is shown in the right panel, with a
truncation given by the solid line. The points in the left panel are computed
from the truncated data set using the C- method, with error bars from 20
bootstrap resamples.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from astroML.lumfunc import bootstrap_Cminus
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define and sample our distributions
N = 10000
np.random.seed(42)
# Define the input distributions for x and y
x_pdf = stats.truncnorm(-2, 1, 0.66666, 0.33333)
y_pdf = stats.truncnorm(-1, 2, 0.33333, 0.33333)
x = x_pdf.rvs(N)
y = y_pdf.rvs(N)
# define the truncation: we'll design this to be symmetric
# so that xmax(y) = max_func(y)
# and ymax(x) = max_func(x)
max_func = lambda t: 1. / (0.5 + t) - 0.5
xmax = max_func(y)
xmax[xmax > 1] = 1 # cutoff at x=1
ymax = max_func(x)
ymax[ymax > 1] = 1 # cutoff at y=1
# truncate the data
flag = (x < xmax) & (y < ymax)
x = x[flag]
y = y[flag]
xmax = xmax[flag]
ymax = ymax[flag]
x_fit = np.linspace(0, 1, 21)
y_fit = np.linspace(0, 1, 21)
#------------------------------------------------------------
# compute the Cminus distributions (with bootstrap)
x_dist, dx_dist, y_dist, dy_dist = bootstrap_Cminus(x, y, xmax, ymax,
x_fit, y_fit,
Nbootstraps=20,
normalize=True)
x_mid = 0.5 * (x_fit[1:] + x_fit[:-1])
y_mid = 0.5 * (y_fit[1:] + y_fit[:-1])
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2))
fig.subplots_adjust(bottom=0.2, top=0.95,
left=0.1, right=0.92, wspace=0.25)
# First subplot is the true & inferred 1D distributions
ax = fig.add_subplot(121)
ax.plot(x_mid, x_pdf.pdf(x_mid), '-k', label='$p(x)$')
ax.plot(y_mid, y_pdf.pdf(y_mid), '--k', label='$p(y)$')
ax.legend(loc='lower center')
ax.errorbar(x_mid, x_dist, dx_dist, fmt='ok', ecolor='k', lw=1, ms=4)
ax.errorbar(y_mid, y_dist, dy_dist, fmt='^k', ecolor='k', lw=1, ms=4)
ax.set_ylim(0, 1.8)
ax.set_xlim(0, 1)
ax.set_xlabel('$x$, $y$')
ax.set_ylabel('normalized distribution')
# Second subplot is the "observed" 2D distribution
ax = fig.add_subplot(122)
H, xb, yb = np.histogram2d(x, y, bins=np.linspace(0, 1, 41))
plt.imshow(H.T, origin='lower', interpolation='nearest',
extent=[0, 1, 0, 1], cmap=plt.cm.binary)
cb = plt.colorbar()
x_limit = np.linspace(-0.1, 1.1, 1000)
y_limit = max_func(x_limit)
x_limit[y_limit > 1] = 0
y_limit[x_limit > 1] = 0
ax.plot(x_limit, y_limit, '-k')
ax.set_xlim(0, 1.1)
ax.set_ylim(0, 1.1)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
cb.set_label('counts per pixel')
ax.text(0.93, 0.93, '%i points' % len(x), ha='right', va='top',
transform=ax.transAxes)
plt.show()
| [
"vanderplas@astro.washington.edu"
] | vanderplas@astro.washington.edu |
29c37a9dfe43e6415a3d457fbea720baf1e1e1d2 | dfcb9827b966a5055a47e27b884eaacd88269eb1 | /ssseg/cfgs/ce2p/cfgs_voc_resnet101os8.py | 385c6008fa55db823a508dafaaef19402fd531a9 | [
"MIT"
] | permissive | RiDang/sssegmentation | cdff2be603fc709c1d03897383032e69f850f0cd | 2a79959a3d7dff346bab9d8e917889aa5621615a | refs/heads/main | 2023-02-05T12:52:35.391061 | 2020-12-27T05:59:58 | 2020-12-27T05:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | '''define the config file for voc and resnet101os8'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'voc',
'set': 'trainaug',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
DATASET_CFG['test'].update(
{
'type': 'voc',
'rootdir': 'data/VOCdevkit/VOC2012',
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 60,
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 21,
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 8,
'use_stem': True
}
}
)
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'ce2p_resnet101os8_voc_train',
'logfilepath': 'ce2p_resnet101os8_voc_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'ce2p_resnet101os8_voc_test',
'logfilepath': 'ce2p_resnet101os8_voc_test/test.log',
'resultsavepath': 'ce2p_resnet101os8_voc_test/ce2p_resnet101os8_voc_results.pkl'
}
) | [
"1159254961@qq.com"
] | 1159254961@qq.com |
b7eb6dfe9463dbbb7e94e66dc97dd8fd8e80d49b | 0809673304fe85a163898983c2cb4a0238b2456e | /src/lesson_algorithms/contextlib_exitstack_callbacks_error.py | ce142be2225b3d9e9678f796d9a6bd379eda226d | [
"Apache-2.0"
] | permissive | jasonwee/asus-rt-n14uhp-mrtg | 244092292c94ff3382f88f6a385dae2aa6e4b1e1 | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | refs/heads/master | 2022-12-13T18:49:02.908213 | 2018-10-05T02:16:41 | 2018-10-05T02:16:41 | 25,589,776 | 3 | 1 | Apache-2.0 | 2022-11-27T04:03:06 | 2014-10-22T15:42:28 | Python | UTF-8 | Python | false | false | 356 | py | import contextlib
def callback(*args, **kwds):
print('closing callback({}, {})'.format(args, kwds))
try:
with contextlib.ExitStack() as stack:
stack.callback(callback, 'arg1', 'arg2')
stack.callback(callback, arg3='val3')
raise RuntimeError('thrown error')
except RuntimeError as err:
print('ERROR: {}'.format(err))
| [
"peichieh@gmail.com"
] | peichieh@gmail.com |
3549ee4210441cea0700fff60b1ab8ff44b03cf8 | 86cc17a69213569af670faed7ad531cb599b960d | /prooo26.py | 19af9019514aa873268da93a2c03efbcc42129c0 | [] | no_license | LakshmikanthRavi/guvi-lux | ed1c389e27a9ec62e0fd75c140322563f68d311a | 5c29f73903aa9adb6484c76103edf18ac165259e | refs/heads/master | 2020-04-15T05:07:19.743874 | 2019-08-13T08:53:00 | 2019-08-13T08:53:00 | 164,409,489 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | z1=int(input())
l=list(map(int,input().split()))
p=[]
t=[]
u=[]
o=[]
for i in range(0,len(l)+1):
for j in range(0,len(l)+1):
p.append(l[i:j])
for i in p:
if i!=[]:
t.append(i)
for i in t:
if sorted(i)==i:
u.append(i)
for i in u:
o.append(len(i))
print(max(o))
| [
"noreply@github.com"
] | LakshmikanthRavi.noreply@github.com |
02a0734050ac0604dbf68fd0310300efecd5d176 | 780ab93e6c6871673ae667c9a180892bd3073f56 | /app/__init__.py | 073ff3e32cc176e023d0c1fa0460e64bcd462e67 | [] | no_license | changbj00/ApiTestManage | d6a8d030289455555a0ed460ed880081da08b4bf | c2fd19c54100acf72ce61bf920dadeae5a34f747 | refs/heads/master | 2020-04-01T00:15:11.097011 | 2018-09-30T09:41:30 | 2018-09-30T09:41:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | # encoding: utf-8
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_moment import Moment
from flask_login import LoginManager
from config import config
from config import config_log
from config import ConfigTask
from .util import global_variable # 初始化文件地址
login_manager = LoginManager()
login_manager.session_protection = 'None'
# login_manager.login_view = '.login'
db = SQLAlchemy()
moment = Moment()
scheduler = ConfigTask().scheduler
basedir = os.path.abspath(os.path.dirname(__file__))
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
app.logger.addHandler(config_log()) # 初始化日志
config[config_name].init_app(app)
moment.init_app(app)
# https://blog.csdn.net/yannanxiu/article/details/53426359 关于定时任务访问数据库时报错
# 坑在这2个的区别 db = SQLAlchemy() db = SQLAlchemy(app)
db.init_app(app)
db.app = app
db.create_all()
login_manager.init_app(app)
scheduler.start() # 定时任务启动
# from .main import main as main_blueprint
# app.register_blueprint(main_blueprint)
# from .pro import pro as pro_blueprint
# app.register_blueprint(pro_blueprint, url_prefix='/pro')
#
# from .DataTool import DataTools as DataTool_blueprint
# app.register_blueprint(DataTool_blueprint, url_prefix='/dataTool')
#
# from .TestCase import TestCases as TestCase_blueprint
# app.register_blueprint(TestCase_blueprint, url_prefix='/TestCase')
#
# from .testpage import testpages as testpage_blueprint
# app.register_blueprint(testpage_blueprint, url_prefix='/testpage')
#
# from .apiManage import apiManages as apiManages_blueprint
# app.register_blueprint(apiManages_blueprint, url_prefix='/apiManage')
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api')
# from .api.model import api as api_blueprint
# app.register_blueprint(api_blueprint, url_prefix='/api')
return app
| [
"362508572@qq.com"
] | 362508572@qq.com |
e709360b59b2c74ebfaf4a1c0ea08606f4413773 | 590a68b6e68b41b6b9f8d8f5240df3181af0b07f | /RNN/test/dataloader_frames_p2.py | bf4661febeea33d94a2b6ba2ae72bb83bd3f9d6e | [] | no_license | ziqinXU/RNN-Video-Segmentation | e1a3098f5597960f57a1626c2cec08ad5f6635b0 | 0baa348fd08fa7f6813cd55e70004b96c559b46a | refs/heads/master | 2022-04-04T10:51:44.583855 | 2020-02-06T07:14:49 | 2020-02-06T07:14:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | import os
import pickle as pk
import reader
import numpy as np
import sys
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from generate_frames_p2 import extract_frames
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
class DATA(Dataset):
def __init__(self, opt):
# Call frame generator function
self.frames_data = extract_frames(opt)
# Transform the image
self.transform = transforms.Compose([
transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB
transforms.Normalize(MEAN, STD)
])
def __getitem__(self, idx):
frames_list = self.frames_data[idx] # contains sampled frames for one video, in a list
N = len(frames_list) # number of frames in this particular video
frames_tensor = torch.zeros(N, 3, 240, 320) # tensor of dimension NxCx240x320, which will contain all N frames for one video
# Transform each frame (currently numpy array) in the list into a tensor, and put it into the pre-allocated tensor
for i in range(N):
frames_tensor[i,:,:,:] = self.transform(frames_list[i]) # each frame is now a tensor, Cx240x320
return frames_tensor
def __len__(self):
return len(self.frames_data) # number of videos that were sampled
| [
"noreply@github.com"
] | ziqinXU.noreply@github.com |
78765e98d7aca64c1627f12e61159fa84b27b105 | 1287bbb696e240dd0b92d56d4fdf4246370f3e14 | /_requests.py | d8a233f7f9ee5c53302d0f30e1a99ef3ed08382e | [] | no_license | omerfarukcelenk/PythonCalismalari | ed0c204084860fddcb892e6edad84fdbc1ed38ec | 28da12d7d042ec306f064fb1cc3a1a026cb57b74 | refs/heads/main | 2023-04-13T18:23:15.270020 | 2021-04-26T21:06:21 | 2021-04-26T21:06:21 | 361,893,918 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | import requests
import json
result = requests.get("https://jsonplaceholder.typicode.com/todos")
result = json.loads(result.text)
print(result[0]["title"])
print(result[0])
for i in result:
print(i["title"])
print(type(result))
| [
"omerfar0133@gmail.com"
] | omerfar0133@gmail.com |
81fd332f532043398a89498afa06b89ca560e631 | 944a91cbdb75e53e3de22604b258d2404bc7415d | /jobs/urls.py | 5848eb07e3640a5b1574e0abbc5be4e7e4dcadbd | [] | no_license | lopezjronald/django-finance-project | 3662adbe406071b7a07ab36c9592ac194f5e91ea | a49667f2c4b4b0350d0322a4def49803c3b19c15 | refs/heads/master | 2022-12-25T00:11:22.905175 | 2020-09-29T15:45:20 | 2020-09-29T15:45:20 | 293,967,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from django.urls import path
from .views import JobListView, JobDetailView
urlpatterns = [
path('', JobListView.as_view(), name='job_list'),
path('<uuid:pk>/', JobDetailView.as_view(), name='job_detail'),
]
| [
"lopez.j.ronald@gmail.com"
] | lopez.j.ronald@gmail.com |
6522b74111249a279635c042c7be80a569507cc3 | 30a8b69bd2e0a3f3c2c1c88fb3bd8a28e6fc4cd0 | /Part1/auth_uri_foursquare.py | b637f4a2e5e5fcb20022842e6ea6f0ce4918dfb4 | [] | no_license | llord1/Mining-Georeferenced-Data | d49108f443922f02b90431ad7a9626ea17fd0554 | c71f2e151ccfc4a1a9c07b5fcf4e95b7f7ba70e9 | refs/heads/master | 2021-05-30T13:27:57.663015 | 2015-12-29T09:10:08 | 2015-12-29T09:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | #!/usr/bin/env python
import foursquare
accounts = {"tutorial": {"client_id": "CLIENT_ID",
"client_secret": "CLIENT_SECRET",
"access_token": ""
}
}
app = accounts["tutorial"]
client = foursquare.Foursquare(client_id=app["client_id"],
client_secret=app["client_secret"],
redirect_uri='http://www.bgoncalves.com/redirect')
auth_uri = client.oauth.auth_url()
print auth_uri
| [
"bgoncalves@gmail.com"
] | bgoncalves@gmail.com |
1dc1981e9a043f4565683d175e6c60902db0a53b | 899aeab7e2594964ee57f60b127db6913b755342 | /src/importlinter/__init__.py | 96d6ea30852d12e3ccae4a0c89ba08aec3e76e6d | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | skarzi/import-linter | c8b41a7828d3dc3970c63612fee0f5687d93be17 | 1ac62d58575235ada887e594819ed294c1b1d2b2 | refs/heads/master | 2023-02-06T13:39:10.883308 | 2020-12-01T08:44:59 | 2020-12-01T08:44:59 | 264,639,120 | 0 | 0 | BSD-2-Clause | 2020-05-17T10:35:50 | 2020-05-17T10:35:49 | null | UTF-8 | Python | false | false | 157 | py | __version__ = "1.2"
from .application import output # noqa
from .domain import fields # noqa
from .domain.contract import Contract, ContractCheck # noqa
| [
"david.seddon@octoenergy.com"
] | david.seddon@octoenergy.com |
a67a06cb6603c817b9768586b740c9ac6d754f98 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part010168.py | 32165658309da40110f837edcd7c20bbc61d6fcc | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher51127(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher51127._instance is None:
CommutativeMatcher51127._instance = CommutativeMatcher51127()
return CommutativeMatcher51127._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 51126
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
52877fb16c62338f0a45f2d98d40f9cbdd981dd0 | 6fd3fff474656ff98dffb597fef249f156624856 | /footlbotest/leaveMsgUI/51xingsheng/leaveMsgUI004.py | 5c70ce57150203f53045a361e443f424c6167c47 | [] | no_license | lobo1233456/footlbotestproj | c241d426a5f186616cbdf68f1e093e0a56c4db33 | f69ea745f1a91f3f365007506c5e6d12cdc7dd8b | refs/heads/master | 2020-07-05T14:42:42.067328 | 2019-09-09T06:18:43 | 2019-09-09T06:18:43 | 202,675,044 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,870 | py | #!user/bin/env python3
# -*- coding: UTF-8 -*-
import random
import time
from retrying import retry
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException, ElementClickInterceptedException
from selenium.webdriver.support.select import Select
from footlbolib.testcase import FootlboTestCase
class leaveMsgUI004(FootlboTestCase):
'''
非合作商页面右侧留言窗口
'''
owner = "liubo"
timeout = 5
priority = FootlboTestCase.EnumPriority.High
status = FootlboTestCase.EnumStatus.Design
tags = "BVT"
def pre_test(self):
self.accept_next_alert = True
def run_test(self):
self.driver = webdriver.Firefox()
driver = self.driver
# i = random.randint(7,9)
# print("----------%s-----------"%i)
driver.get("https://www.51xinsheng.com/ctbj/")
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='首页'])[2]/following::li[1]").click()
driver.find_element_by_link_text(u"客厅").click()
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='混搭'])[2]/following::img[1]").click()
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::select[1]").click()
Select(driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::select[1]")).select_by_visible_text(
u"河北省")
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::option[4]").click()
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::select[2]").click()
Select(driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::select[2]")).select_by_visible_text(
u"邯郸市")
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::option[40]").click()
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::select[3]").click()
Select(driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::select[3]")).select_by_visible_text(
u"101-150㎡")
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::option[52]").click()
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::input[1]").clear()
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::input[1]").send_keys(
"13764743157")
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='装修成这样花多少钱'])[1]/following::button[1]").click()
time.sleep(2)
msg = self.close_alert_and_get_its_text(driver)
self.log_info(msg)
self.assert_("检查成功提交的结果", u"报价有疑问?装修管家稍后致电为您解答" == msg)
def post_test(self):
self.driver.quit()
self.log_info("testOver")
if __name__ == '__main__':
leaveMsgUI004().debug_run()
| [
"1009548820@qq.com"
] | 1009548820@qq.com |
1199f68eef14572d6f677aad8d14f4498c886919 | 8d3835e39cbc2c74d8535b809686d6ab3033c0d0 | /ecommerce/orders/migrations/0009_auto_20190123_2034.py | 3931948db21a8cd84cd407429a0528cf9f12ada5 | [] | no_license | gayatribasude/GayatrisWorld | 125698955cd8b98a5aa2377331293587a57f2911 | 552ea2ef946e95f5bccc4e51d4030484ab0bc438 | refs/heads/master | 2023-06-25T19:45:03.232059 | 2021-08-02T16:43:47 | 2021-08-02T16:43:47 | 384,343,617 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # Generated by Django 2.1.3 on 2019-01-23 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0008_auto_20190123_1027'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('created', 'Created'), ('paid', 'Paid'), ('shipped', 'Shipped'), ('refunded', 'Refunded')], default='created', max_length=20),
),
]
| [
"gayatribasude"
] | gayatribasude |
03d5e8af92c8ad4c1a8a5acf1d00dfb40be96012 | 390f1fd25ebce9e18bce582b7c0b81ab989ff738 | /npmctree/sampling.py | 1d27118bced607cdbcbaaf2cec4148654dd15a74 | [] | no_license | argriffing/npmctree | 439f1e94ff876dfa09e3ab21ec7691c44b67b813 | 274f022baca5a4d3d4e74392ce79934ff3d21c32 | refs/heads/master | 2016-09-11T08:12:33.662556 | 2014-09-15T19:13:08 | 2014-09-15T19:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,409 | py | """
Joint state sampling algorithm for a Markov chain on a NetworkX tree graph.
"""
from __future__ import division, print_function, absolute_import
import random
import numpy as np
import networkx as nx
from npmctree import dynamic_fset_lhood, dynamic_lmap_lhood
from .util import normalized, weighted_choice
__all__ = [
'sample_history',
'sample_histories',
'sample_unconditional_history',
'sample_unconditional_histories',
]
def sample_history(T, edge_to_P, root,
root_prior_distn1d, node_to_data_lmap):
"""
Jointly sample states on a tree.
This is called a history.
"""
v_to_subtree_partial_likelihoods = dynamic_lmap_lhood._backward(
T, edge_to_P, root, root_prior_distn1d, node_to_data_lmap)
node_to_state = _sample_states_preprocessed(T, edge_to_P, root,
v_to_subtree_partial_likelihoods)
return node_to_state
def sample_histories(T, edge_to_P, root,
root_prior_distn1d, node_to_data_lmap, nhistories):
"""
Sample multiple histories.
Each history is a joint sample of states on the tree.
"""
v_to_subtree_partial_likelihoods = dynamic_lmap_lhood._backward(
T, edge_to_P, root, root_prior_distn1d, node_to_data_lmap)
for i in range(nhistories):
node_to_state = _sample_states_preprocessed(T, edge_to_P, root,
v_to_subtree_partial_likelihoods)
yield node_to_state
def _sample_states_preprocessed(T, edge_to_P, root,
v_to_subtree_partial_likelihoods):
"""
Jointly sample states on a tree.
This variant requires subtree partial likelihoods.
"""
root_partial_likelihoods = v_to_subtree_partial_likelihoods[root]
n = root_partial_likelihoods.shape[0]
if not root_partial_likelihoods.any():
return None
distn1d = normalized(root_partial_likelihoods)
root_state = weighted_choice(n, p=distn1d)
v_to_sampled_state = {root : root_state}
for edge in nx.bfs_edges(T, root):
va, vb = edge
P = edge_to_P[edge]
# For the relevant parent state,
# compute an unnormalized distribution over child states.
sa = v_to_sampled_state[va]
# Construct conditional transition probabilities.
sb_weights = P[sa] * v_to_subtree_partial_likelihoods[vb]
# Sample the state.
distn1d = normalized(sb_weights)
v_to_sampled_state[vb] = weighted_choice(n, p=distn1d)
return v_to_sampled_state
def sample_unconditional_history(T, edge_to_P, root, root_prior_distn1d):
"""
No data is used in the sampling of this state history at nodes.
"""
nstates = root_prior_distn1d.shape[0]
node_to_state = {root : weighted_choice(nstates, p=root_prior_distn1d)}
for edge in nx.bfs_edges(T, root):
va, vb = edge
P = edge_to_P[edge]
sa = node_to_state[va]
node_to_state[vb] = weighted_choice(nstates, p=P[sa])
return node_to_state
def sample_unconditional_histories(T, edge_to_P, root,
root_prior_distn1d, nhistories):
"""
Sample multiple unconditional histories.
This function is not as useful as its conditional sampling analog,
because this function does not require pre-processing.
"""
for i in range(nhistories):
yield sample_unconditional_history(
T, edge_to_P, root, root_prior_distn1d)
| [
"argriffi@ncsu.edu"
] | argriffi@ncsu.edu |
b0dd601df311767b266472a4fb9bae1c30625292 | f18ca41c7aaa1da62ade3fb12ff69c2de863bc5f | /server/workers/dataprocessing/run_dataprocessing.py | 7f299c873debe81d77d589105ffae658b93b4248 | [
"MIT"
] | permissive | chreman/Headstart | 983111e03db2e69406f3c6feb6387b0757c6b62d | 5d8b956faac4389c649f3072b5ac55aaa01644c6 | refs/heads/master | 2021-10-21T04:50:03.608480 | 2021-10-14T13:02:53 | 2021-10-14T13:02:53 | 20,580,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | import os
import json
import redis
from dataprocessing.src.headstart import Dataprocessing
if __name__ == '__main__':
redis_config = {
"host": os.getenv("REDIS_HOST"),
"port": os.getenv("REDIS_PORT"),
"db": os.getenv("REDIS_DB"),
"password": os.getenv("REDIS_PASSWORD")
}
redis_store = redis.StrictRedis(**redis_config)
dp = Dataprocessing("./other-scripts", "run_vis_layout.R",
redis_store=redis_store,
loglevel=os.environ.get("HEADSTART_LOGLEVEL", "INFO"))
dp.run()
| [
"web@christopherkittel.eu"
] | web@christopherkittel.eu |
7025db40f2f96e1a274d3040dac948135d57aefc | 043e511436798e9aed96052baddac7a353ac6562 | /paintHouse.py | b1ca2afe5bc36ee0ad4dad8eecab0186121c949e | [] | no_license | bch6179/Pyn | 01e19f262cda6f7ee1627d41a829609bde153a93 | e718fcb6b83664d3d6413cf9b2bb4a875e62de9c | refs/heads/master | 2021-01-22T21:28:13.982722 | 2017-05-05T07:21:19 | 2017-05-05T07:21:19 | 85,434,828 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | class Solution(object):
def paintHouse(self, costs):
if costs == None or len(costs) == 0: return -1
n = len(costs)
for i in range(1,n):
costs[i][0] = costs[i][0] + min(costs[i-1][1], costs[i-1][2])
costs[i][1] = costs[i][1] + min(costs[i-1][0], costs[i-1][2])
costs[i][2] = costs[i][2] + min(costs[i-1][0], costs[i-1][1])
return min(costs[n-1][0],costs[n-1][1],costs[n-1][2] )
def paintHouseK(self, costs):
if costs == None or len(costs) == 0: return -1
n = len(costs)
prevMin, prevSec = 0, 0
prevId = -1
for i in range(0,n):
curMin, curSec = 1 << 31 -1, 1<<31-1
curIdx = -1
for j in range(0, k):
costs[i][j] = costs[i][j] + prevMin if prevIdx != j else prevSec
if costs[i][j]< curMin:
curSec = curMin
curMin = costs[i][j]
curIdx = j
elif costs[i][j]< curSec:
curSec = costs[i][j]
prevMin = curMin
prevSec = curSec
prevIdx = curIdx
return prevMin
s = Solution()
print s.paintHouse([[1,2,3],[1,2,3],[1,2,3]]) | [
"bch6179@gmail.com"
] | bch6179@gmail.com |
cdff1430959d3feb6893f479095f9de2ba77ae53 | e3dd0254121b8c51b30fad8f494207d7776af5fe | /BOJ/01000/01075.py | 27bfbc64e870d5d8ca06493df02cbf341b47e3bd | [] | no_license | Has3ong/Algorithm | 609d057b10641466b323ce8c4f6e792de1ab5d0b | 83390f2760e54b9b9cf608f3a680f38d5bb5cddc | refs/heads/master | 2020-06-15T03:15:02.908185 | 2019-09-15T16:17:14 | 2019-09-15T16:17:14 | 195,190,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def solution():
N = int(input())
F = int(input())
N = (N // 100) * 100
for i in range(100):
M = N + i
if M % F == 0:
break
print(str(M)[-2:])
solution() | [
"khsh5592@naver.com"
] | khsh5592@naver.com |
5b58c08e3876a61c430a35961b7173b43b21a8a3 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_304/ch85_2019_06_06_19_36_38_339098.py | 1a496634416c079b1b60dd94e2c6d2b3b6661571 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | with open ('macacos-me-mordam.txt', 'r') as arquivo:
conteudo=arquivo.read()
maiusculo=conteudo.upper()
conta_banana=maiusculo.split()
contador=0
for e in conta_banana:
if e == 'BANANA':
contador+=1
print (contador)
| [
"you@example.com"
] | you@example.com |
c35f2ecffbddad9a986f066f6f1b2c9ab404aeda | 0be0bdd8574eda8ec6f0ff340e88bd8677618bb3 | /s01-create-project/meal_options/meal_options/app.py | 01ffc0ddd4b02021af961c76f059fa482d052a55 | [] | no_license | genzj/flask-restful-api-course | 1e97396d6fea50f34922530dc3777393178995c0 | f9ab1bc7da724019dfacc5b94536ec5e8b6afad7 | refs/heads/master | 2023-05-11T06:19:52.307109 | 2022-12-20T20:35:57 | 2022-12-20T20:35:57 | 165,891,710 | 1 | 1 | null | 2023-05-02T00:19:49 | 2019-01-15T17:12:22 | Python | UTF-8 | Python | false | false | 140 | py | from . import create_app
app = create_app()
@app.route("/")
def index():
app.logger.warning("hello world")
return "Hello World!"
| [
"zj0512@gmail.com"
] | zj0512@gmail.com |
902836baed5634be041c938b400c0b355c6b5b0f | 632e8ed762f9f694e8f72d4d65303b5246a11217 | /py/word_count.py | e66f432c740864a91e65bfe0e842bbf04ea07f34 | [] | no_license | yiyusheng/django | 36537bedf7efd2db3e41809c898cdabe2af6c381 | a8a79ef0323cd9234cec83735618940f63dfc2a4 | refs/heads/master | 2022-11-19T11:36:30.868697 | 2022-11-06T04:01:50 | 2022-11-06T04:01:50 | 97,066,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,404 | py | # -*- coding: utf-8 -*-
import pymysql,warnings,jieba,sys
import pandas as pd
from datetime import datetime,timedelta
from collections import Counter
# mysql connect
def sqlcon():
conn = pymysql.connect(
host = "localhost",
user = "root",
passwd = "qwer1234",
charset = "utf8",
database = 'prichat'
)
return conn
# get data from table chat_logs for one hour
def get_chats(sc,ts_now,all=False):
cursor = sc.cursor()
if all:
sql = "SELECT content FROM chat_logs"
else:
sql = "SELECT content FROM chat_logs WHERE date_format(time,'%%Y-%%m-%%d %%H:00:00')=%s"
cursor.execute(sql,(ts_now.strftime('%Y-%m-%d %H:%M:%S')))
data = cursor.fetchall()
data = [d[0] for d in data]
return data
# parse these data with jieba
def data_parse(data):
word_list = [list(jieba.cut(d,cut_all=False)) for d in data]
word_list = [i for a in word_list for i in a]
return word_list
# store them to table word_count and word_count_hourly
def word_insert(sc,word_count,ts_now,len_logs):
cursor = sc.cursor()
df = pd.DataFrame(list(word_count.items()),columns=['word','count'])
df['weighted_count'] = df['count']/len_logs
df['weighted_count'] = df['weighted_count'].round(4)
df['time'] = ts_now.strftime('%Y-%m-%d %H:%M:%S')
# for hourly count
sql = "INSERT INTO word_count_hourly(time,word,count,weighted_count) VALUES(%s,%s,%s,%s)"
cursor.executemany(sql,df[['time','word','count','weighted_count']].values.tolist())
cursor.connection.commit()
# for total count
sql = "INSERT INTO word_count(time,word,count) VALUES(%s,%s,%s) ON DUPLICATE KEY UPDATE count=count+%s"
for i in range(len(df)):
cursor.execute(sql,(df['time'][i],df['word'][i],str(df['count'][i]),str(df['count'][i])))
cursor.connection.commit()
if __name__=='__main__':
argv = sys.argv
#ts_now = datetime.strptime(argv[1],'%Y-%m-%dT%H:%M:%S')
if len(argv)==2:
ts_now = datetime.strptime(argv[1],'%Y-%m-%dT%H:%M:%S')
print 'special time:%s' %(ts_now)
else:
# for last hour
ts_now = datetime.utcnow().replace(minute=0,second=0,microsecond=0)-timedelta(hours=1)
sc = sqlcon()
data = get_chats(sc,ts_now)
word_list = data_parse(data)
word_count = Counter(word_list)
word_insert(sc,word_count,ts_now,len(data))
| [
"yiyusheng.hust@gmail.com"
] | yiyusheng.hust@gmail.com |
3a4b99682fa6c9d529af4e44480f812cef0d3781 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Fisher/trend_ConstantTrend/cycle_5/ar_12/test_artificial_128_Fisher_ConstantTrend_5_12_0.py | 825a6bdaa5c4a8acc661aa02e67ba5736b2b433f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 270 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
2b3de044fbd1ca7140aa5bd56bb39b86532524cb | b121b4135f0edf0e39c1ae7343c7df19f56a077f | /prototypes/decorators/Fibonacci_Cache_decorator_Class.py | ddb98fbbb340a1ac1a4c0dd94506ff2aa344a4df | [] | no_license | MPIBGC-TEE/bgc-md | 25379c03d2333481bd385211f49aff6351e5dd05 | 8912a26d1b7e404ed3ebee4d4799a3518f507756 | refs/heads/master | 2021-05-08T19:07:46.930394 | 2020-10-21T12:08:53 | 2020-10-21T12:08:53 | 119,548,100 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | class Memoize:
def __init__(self,fn):
self.fn=fn
self.memo={}
def __call__(self,*args):
if args not in self.memo:
self.memo[args] = self.fn(*args)
return self.memo[args]
@Memoize
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
print(fib(36))
| [
"markus.mueller.1.g@googlemail.com"
] | markus.mueller.1.g@googlemail.com |
a039f6918e1185e2714bffca130845312af93bae | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2716/60712/321105.py | e2f159e0ba7662e623164f51e9759d7ba83ab298 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | l=[]
for i in range(3):
l.append(input())
if l=='['[', ' "//",', ' "/ "']':
print(3)
elif l==[[0, 0]]:
print(0)
else:
print(l) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
8d1385879e292e12bcc56f1d11b3835812fd8220 | cbbce6a21a57c6a638fc0144f2e4dd9583adb30f | /Estrutura_De_Decisao/estruturadedecisao-13.py | f65664112836ff48a86a6bf5972fb42292eba44c | [] | no_license | gaijinctfx/PythonExercicios | 5d0e0decfe8122b5d07713b33aea66b736700554 | 1e0cde4f27b14ac192e37da210bad3f7023437c7 | refs/heads/master | 2022-05-11T21:57:17.184820 | 2017-06-08T03:21:49 | 2017-06-08T03:21:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # author: ZumbiPy
# E-mail: zumbipy@gmail.com
# Exercicio do site http://wiki.python.org.br/EstruturaDeDecisao
# Para execurta o programa on line entra no link a baixo:
# https://repl.it/I2Ee/0
"""
13 - Faça um Programa que leia um número e exiba o dia correspondente
da semana. (1-Domingo, 2- Segunda, etc.), se digitar outro valor
deve aparecer valor inválido.
"""
# ================================================================================
# Variáveis do programa
# ================================================================================
# Entrada de Dados.
dia_da_semana = int(input("Digite Um numero de 1 ao 7: "))
# ================================================================================
# Logica do programa
# ================================================================================
# Comparações.
if dia_da_semana == 1:
print("Numero {} correspondente á Domingo".format(dia_da_semana))
elif dia_da_semana == 2:
print("Numero {} correspondente á Segunda".format(dia_da_semana))
elif dia_da_semana == 3:
print("Numero {} correspondente á Terça".format(dia_da_semana))
elif dia_da_semana == 4:
print("Numero {} correspondente á Quarta".format(dia_da_semana))
elif dia_da_semana == 5:
print("Numero {} correspondente á Quinta".format(dia_da_semana))
elif dia_da_semana == 6:
print("Numero {} correspondente á Sexta".format(dia_da_semana))
elif dia_da_semana == 7:
print("Numero {} correspondente á Sabado".format(dia_da_semana))
else:
print("Valor Invalido.")
| [
"zumbipy@gmail.com"
] | zumbipy@gmail.com |
41e5ce490aacef1706d7e4fc24123d2a67a90fa1 | 99259216f11b15ec60446b4a141b3592a35560ce | /wex-python-api/ibmwex/models/nlp_document.py | bf8459f2bec05fb8a89b69ac55ae507867f2e20b | [] | no_license | adam725417/Walsin | 296ba868f0837077abff93e4f236c6ee50917c06 | 7fbefb9bb5064dabccf4a7e2bf49d2a43e0f66e9 | refs/heads/master | 2020-04-12T14:14:07.607675 | 2019-03-05T01:54:03 | 2019-03-05T01:54:03 | 162,546,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,784 | py | # coding: utf-8
"""
WEX REST APIs
Authentication methods - Basic Auth - JSON Web Token - [POST /api/v1/usermgmt/login](#!/User/signinUser) - [POST /api/v1/usermgmt/logout](#!/User/doLogout) - Python client sample [Download](/docs/wex-python-api.zip)
OpenAPI spec version: 12.0.2.417
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class NLPDocument(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fields': 'object',
'metadata': 'object'
}
attribute_map = {
'fields': 'fields',
'metadata': 'metadata'
}
def __init__(self, fields=None, metadata=None):
"""
NLPDocument - a model defined in Swagger
"""
self._fields = None
self._metadata = None
self.fields = fields
if metadata is not None:
self.metadata = metadata
@property
def fields(self):
"""
Gets the fields of this NLPDocument.
:return: The fields of this NLPDocument.
:rtype: object
"""
return self._fields
@fields.setter
def fields(self, fields):
"""
Sets the fields of this NLPDocument.
:param fields: The fields of this NLPDocument.
:type: object
"""
if fields is None:
raise ValueError("Invalid value for `fields`, must not be `None`")
self._fields = fields
@property
def metadata(self):
"""
Gets the metadata of this NLPDocument.
:return: The metadata of this NLPDocument.
:rtype: object
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this NLPDocument.
:param metadata: The metadata of this NLPDocument.
:type: object
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, NLPDocument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"adamtp_chen@walsin.com"
] | adamtp_chen@walsin.com |
0979f9239fd35acdc47af14483eb5da3f0e0521b | 6ea94d75b6e48952c1df2bda719a886f638ed479 | /devel/lib/python2.7/dist-packages/object_recognition_core/__init__.py | 69401447b8ce90354d2b13050999f5c8523ae0b6 | [] | no_license | lievech/ork_ws | 634e26355503c69b76df7fca41402ea43c228f49 | e828846b962974a038be08a5ce39601b692d4045 | refs/heads/master | 2020-08-02T20:19:43.109158 | 2019-09-28T11:56:56 | 2019-09-28T11:56:56 | 211,493,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/lhn/ork_ws/src/ork_core/python".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"2328187416@qq.com"
] | 2328187416@qq.com |
3e0e1ee40d064048f3b637b4d1b098ac730d61bc | 3e660e22783e62f19e9b41d28e843158df5bd6ef | /script.me.syncsmashingfromgithub/smashingfavourites/scripts/testscripts/refreshpvrinfo/disableiptvtest1.py | 0b6379f6ab0f50cde1e4ee88d0ded24ceaa8d61e | [] | no_license | monthou66/repository.smashingfavourites | a9603906236000d2424d2283b50130c7a6103966 | f712e2e4715a286ff6bff304ca30bf3ddfaa112f | refs/heads/master | 2020-04-09T12:14:34.470077 | 2018-12-04T10:56:45 | 2018-12-04T10:56:45 | 160,341,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # -*- coding: utf-8 -*-
import xbmc
def printstar():
print "***************************************************************************************"
print "****************************************************************************************"
printstar()
print "test1.py has just been started"
printstar()
xbmc.executebuiltin('Notification(test1.py, started)')
if xbmc.getCondVisibility('System.HasAddon(pvr.iptvsimple)'):
xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Addons.SetAddonEnabled","id":8,"params":{"addonid":"pvr.iptvsimple","enabled":false}}')
xbmc.sleep(300)
xbmc.executebuiltin("ActivateWindow(10021)")
xbmc.executebuiltin( "XBMC.Action(Right)" )
xbmc.executebuiltin( "XBMC.Action(Down)" )
xbmc.executebuiltin( "XBMC.Action(Down)" )
xbmc.executebuiltin( "XBMC.Action(Down)" )
xbmc.executebuiltin( "XBMC.Action(Down)" )
xbmc.executebuiltin( "XBMC.Action(Down)" )
xbmc.executebuiltin( "XBMC.Action(Down)" )
xbmc.executebuiltin( "XBMC.Action(Down)" )
xbmc.executebuiltin( "XBMC.Action(Select)" )
xbmc.executebuiltin('SendClick(11)')
# xbmc.sleep(300)
# xbmc.executebuiltin( "XBMC.Action(Back)" )
xbmc.executebuiltin("ActivateWindow(Home)")
| [
"davemullane@gmail.com"
] | davemullane@gmail.com |
67eac9c88b11d3565f79cf41ffc9d127d3f4b194 | df9187f1c78cf61075fa23c27432adef0cce285a | /iteratorss/generators_example1.py | 29c46365c6fa1acc2622cad63f8b2dd8c505b5c8 | [] | no_license | santoshr1016/techmonthppt | 6a17c6c7fc97ef7faad9264ed4f89d455e4db100 | cc5e6bce71b3495a6abd2064f74ac8be8c973820 | refs/heads/master | 2020-03-07T00:17:14.395984 | 2018-03-28T14:56:10 | 2018-03-28T14:56:10 | 127,153,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def gen_function():
n = 1
print('This is printed first')
yield n
n += 1
print('This is printed second')
yield n
n += 1
print('This is printed at last')
yield n
gen = gen_function()
print(next(gen))
print(next(gen))
print(next(gen))
# using the for loop
# for item in gen_function():
# print(item)
| [
"santy1016@gmail.com"
] | santy1016@gmail.com |
43aba475c1cfef902574ad46c9157de811b4527b | deba1fb8df5fa58563b172546ee06d3c69fb59a8 | /shop_asistant_dj/shop_asistant_dj/apps/purchase/migrations/0005_auto_20200814_1432.py | f954ccf8881328d75cde6ce92c8eeded2d69462d | [] | no_license | korid24/shop_list_via_django | 52d71873fe546ab26a253438ec349c5034211122 | 9426f9305697754c10b402ac7b6e858974d14f96 | refs/heads/master | 2023-01-01T09:30:05.339463 | 2020-10-22T10:03:26 | 2020-10-22T10:03:26 | 289,884,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # Generated by Django 3.1 on 2020-08-14 07:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('purchase', '0004_auto_20200814_1203'),
]
operations = [
migrations.AlterField(
model_name='purchase',
name='creation_time',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Creation time'),
),
migrations.AlterField(
model_name='purchaseslist',
name='creation_time',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Creation time'),
),
]
| [
"korid24.dev@gmail.com"
] | korid24.dev@gmail.com |
7d65ca613df3d3dd46ccacae4b6e90189ce005c9 | f3d38d0e1d50234ce5f17948361a50090ea8cddf | /CodeUp/Python 기초 100제/6048번 ; 정수 2개 입력받아 비교하기1.py | 7bc89b85386b03f9c73d7d64141d73bdd23be07a | [] | no_license | bright-night-sky/algorithm_study | 967c512040c183d56c5cd923912a5e8f1c584546 | 8fd46644129e92137a62db657187b9b707d06985 | refs/heads/main | 2023-08-01T10:27:33.857897 | 2021-10-04T14:36:21 | 2021-10-04T14:36:21 | 323,322,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # https://codeup.kr/problem.php?id=6048
# readline을 사용하기 위해 import합니다.
from sys import stdin
# 두 정수를 공백으로 구분해 입력합니다.
# 각각 정수형으로 만들어주고 변수에 넣어줍니다.
a, b = map(int, stdin.readline().split(' '))
# a가 b보다 작다면
if a < b:
# True를 출력합니다.
print(True)
# 그렇지 않다면, 즉, a가 b보다 작지 않다면
else:
# False를 출력합니다.
print(False) | [
"bright_night_sky@naver.com"
] | bright_night_sky@naver.com |
d0b3d534982c31fb50ebe286974255e9b7a45d14 | 7872b02b8f066fa228bbfa2dd6fcfb5a9ee49dc7 | /tests/dump_tests/module_tests/acl_test.py | 105b948a53f44a84fcc38b9e261f2194e140a4fe | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | stephenxs/sonic-utilities | 1d6168206140c5b790cfc1a70cea6a8288040cb0 | c2bc150a6a05c97362d540c874deff81fad6f870 | refs/heads/master | 2023-03-16T05:55:37.688189 | 2023-03-06T18:56:51 | 2023-03-06T18:56:51 | 187,989,457 | 0 | 1 | NOASSERTION | 2023-03-09T13:21:43 | 2019-05-22T07:48:38 | Python | UTF-8 | Python | false | false | 6,227 | py | import os
import pytest
from deepdiff import DeepDiff
from dump.helper import create_template_dict, sort_lists, populate_mock
from dump.plugins.acl_table import Acl_Table
from dump.plugins.acl_rule import Acl_Rule
from dump.match_infra import MatchEngine, ConnectionPool
from swsscommon.swsscommon import SonicV2Connector
from utilities_common.constants import DEFAULT_NAMESPACE
# Location for dedicated db's used for UT
module_tests_path = os.path.dirname(__file__)
dump_tests_path = os.path.join(module_tests_path, "../")
tests_path = os.path.join(dump_tests_path, "../")
dump_test_input = os.path.join(tests_path, "dump_input")
port_files_path = os.path.join(dump_test_input, "acl")
# Define the mock files to read from
dedicated_dbs = {}
dedicated_dbs['CONFIG_DB'] = os.path.join(port_files_path, "config_db.json")
dedicated_dbs['COUNTERS_DB'] = os.path.join(port_files_path, "counters_db.json")
dedicated_dbs['ASIC_DB'] = os.path.join(port_files_path, "asic_db.json")
@pytest.fixture(scope="class", autouse=True)
def match_engine():
os.environ["VERBOSE"] = "1"
# Monkey Patch the SonicV2Connector Object
from ...mock_tables import dbconnector
db = SonicV2Connector()
# popualate the db with mock data
db_names = list(dedicated_dbs.keys())
try:
populate_mock(db, db_names, dedicated_dbs)
except Exception as e:
assert False, "Mock initialization failed: " + str(e)
# Initialize connection pool
conn_pool = ConnectionPool()
conn_pool.fill(DEFAULT_NAMESPACE, db, db_names)
# Initialize match_engine
match_engine = MatchEngine(conn_pool)
yield match_engine
os.environ["VERBOSE"] = "0"
@pytest.mark.usefixtures("match_engine")
class TestAclTableModule:
def test_basic(self, match_engine):
"""
Scenario: When the basic config is properly applied and propagated
"""
params = {Acl_Table.ARG_NAME: "DATAACL", "namespace": ""}
m_acl_table = Acl_Table(match_engine)
returned = m_acl_table.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "ASIC_DB"])
expect["CONFIG_DB"]["keys"].append("ACL_TABLE|DATAACL")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE:oid:0x7000000000600")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc000000000601")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc000000000602")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP:oid:0xb0000000005f5")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP:oid:0xb0000000005f7")
ddiff = DeepDiff(sort_lists(returned), sort_lists(expect))
assert not ddiff, ddiff
def test_no_counter_mapping(self, match_engine):
"""
Scenario: When there is no ACL_COUNTER_RULE_MAP mapping for rule
"""
params = {Acl_Table.ARG_NAME: "DATAACL1", "namespace": ""}
m_acl_table = Acl_Table(match_engine)
returned = m_acl_table.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "ASIC_DB"])
expect["CONFIG_DB"]["keys"].append("ACL_TABLE|DATAACL1")
ddiff = DeepDiff(sort_lists(returned), sort_lists(expect))
assert not ddiff, ddiff
def test_with_table_type(self, match_engine):
"""
Scenario: When there is ACL_TABLE_TYPE configured for this table
"""
params = {Acl_Table.ARG_NAME: "DATAACL2", "namespace": ""}
m_acl_table = Acl_Table(match_engine)
returned = m_acl_table.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "ASIC_DB"])
expect["CONFIG_DB"]["keys"].append("ACL_TABLE|DATAACL2")
expect["CONFIG_DB"]["keys"].append("ACL_TABLE_TYPE|MY_TYPE")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE:oid:0x7100000000600")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc100000000601")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc100000000602")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP:oid:0xb0000000005f5")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP:oid:0xb0000000005f7")
ddiff = DeepDiff(sort_lists(returned), sort_lists(expect))
assert not ddiff, ddiff
@pytest.mark.usefixtures("match_engine")
class TestAclRuleModule:
def test_basic(self, match_engine):
"""
Scenario: When the config is properly applied and propagated
"""
params = {Acl_Rule.ARG_NAME: "DATAACL|R0", "namespace": ""}
m_acl_rule = Acl_Rule(match_engine)
returned = m_acl_rule.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "ASIC_DB"])
expect["CONFIG_DB"]["keys"].append("ACL_RULE|DATAACL|R0")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_COUNTER:oid:0x9000000000606")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY:oid:0x8000000000609")
ddiff = DeepDiff(sort_lists(returned), sort_lists(expect))
assert not ddiff, ddiff
def test_with_ranges(self, match_engine):
"""
Scenario: When ACL rule has range configuration
"""
params = {Acl_Rule.ARG_NAME: "DATAACL2|R0", "namespace": ""}
m_acl_rule = Acl_Rule(match_engine)
returned = m_acl_rule.execute(params)
expect = create_template_dict(dbs=["CONFIG_DB", "ASIC_DB"])
expect["CONFIG_DB"]["keys"].append("ACL_RULE|DATAACL2|R0")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_COUNTER:oid:0x9100000000606")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY:oid:0x8100000000609")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_RANGE:oid:0xa100000000607")
expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ACL_RANGE:oid:0xa100000000608")
ddiff = DeepDiff(sort_lists(returned), sort_lists(expect))
assert not ddiff, ddiff
| [
"noreply@github.com"
] | stephenxs.noreply@github.com |
a6df023df1316eb45efd17d0843f1cfed8d86f28 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02412/s889541613.py | 889b2dbcdc586cb5aac3f12cd5a26d0b3e20daca | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | while True:
i = input().split()
n, x = map(int, i)
if n == 0 and x == 0:
break
count = 0
for a in range(1, n+1):
for b in range(a+1, n+1):
for c in range(b+1, n+1):
if a+b+c == x:
count += 1
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
20cb167f243d0a2395152f20be419b70fa1a0efd | 8fb16223e667e6bf35e3131ba6ed6bc5e0862fd1 | /src/util/constant.py | eeb5d72b7be93835aeb592492eaeff7f46afdbff | [] | no_license | ydup/robot-personal | 22f07738cde2d84d02d97255aa7c0c2d38537eaf | c7b16091bfbe280d1d92a38a46f7d5bbad55e5db | refs/heads/master | 2020-12-26T16:41:08.208720 | 2020-01-31T17:04:54 | 2020-01-31T17:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,915 | py | import sys
import os
import random
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
BASE_PATH = os.path.split(rootPath)[0]
sys.path.append(BASE_PATH)
#### Redis Key Begin
# 每个城市单独保存一个key做集合,订阅的用户在集合中
USE_REDIS = False
# 当前所有疫情数据,类型:list
STATE_NCOV_INFO = 'state_ncov_info'
# 所有有疫情的城市集合
ALL_AREA_KEY = 'all_area'
# 标记为,标记数据是有更新
SHOULD_UPDATE = 'should_update'
# 需要推送更新的数据
UPDATE_CITY = 'update_city'
# 当前已有订阅的城市集合,类型set
ORDER_KEY = 'order_area'
# 用户关注的群聊ID
USER_FOCUS_GROUP = 'user_focus_group'
# 用户关注的群聊名称
USER_FOCUS_GROUP_NAME = 'user_focus_group_name'
#### Redis Key End
### Reg Pattern Begin
UN_REGIST_PATTERN = '^取关|取消(关注)?.+'
UN_REGIST_PATTERN2 = '^取关|取消(关注)?'
### REG PAttern End
BASE_DIR = os.path.join(BASE_PATH, 'resource')
DATA_DIR = os.path.join(BASE_DIR, 'data')
# for localhost redis
REDIS_HOST = '127.0.0.1'
## for docker redis
REDIS_HOST_DOCKER = 'redis'
LOGGING_FORMAT = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
AREA_TAIL = '(自治+)|省|市|县|区|镇'
FIRST_NCOV_INFO = '{}目前有确诊病例{}例,死亡病例{}例,治愈病例{}例。'
FIRST_NCOV_INFO2 = '{}目前有确诊病例{}例,死亡病例{}例,治愈病例{}例。'
INFO1 = '\n向所有奋斗在抗击疫情一线的工作人员、志愿者致敬!'
INFO2 = '\nfeiyan.help,病毒无情,但人间有爱。'
INFO3 = '\n疫情严峻,请您尽量减少外出,避免出入公共场所'
INFO4 = '\n为了保证您能持续最新的疫情消息,根据WX的规则,建议您偶尔回复我一下~'
INFO5 = '\n全部数据来源于腾讯实时疫情追踪平台:https://news.qq.com//zt2020/page/feiyan.htm'
INFO6 = '\n我们是公益组织wuhan.support,网址 https://feiyan.help'
INFO7 = '\n这里是面向疫区内外民众和医疗机构的多维度信息整合平台,https://feiyan.help'
INFO8 = '\nhttps://feiyan.help,支持武汉,我们在一起。'
INFO9 = '\n开源地址:https://github.com/wuhan-support,支持武汉,我们在一起。'
INFO10 = '\n查看更多信息可以戳这里,https://feiyan.help。'
INFO11 = '\n这是一个为了避免微信阻塞消息的随机小尾巴...'
INFO12 = '\n众志成城,抵御疫情,武汉加油!'
INFO13 = '\nhttps://feiyan.help,筑牢抵御疫情蔓延的一道屏障'
INFO_TAILS = [INFO1, INFO2, INFO3, INFO4, INFO5, INFO6, INFO7, INFO8, INFO9, INFO10, INFO11, INFO12, INFO13]
UPDATE_NCOV_INFO = '{}有数据更新,新增确诊病例{}例,目前共有确诊病例{}例,死亡病例{}例,治愈病例{}例。'
UPDATE_NCOV_INFO_ALL = '{}有数据更新,新增确诊病例{}例,疑似病例{}例,目前共有确诊病例{}例,疑似病例{}例,死亡病例{}例,治愈病例{}例。'
NO_NCOV_INFO = '{}暂无疫情信息,请检查地区名称是否正确。'
INFO_TAIL = "若{}等地区数据有更新,我会在第一时间通知您!您也可以通过发送 '取消+地区名'取消关注该地区,比如'取消{}','取消全部'。"
INFO_TAIL_ALL = "若全国的数据有更新,我会在第一时间通知您!您也可以通过发送'取消全部'取消对全部数据的关注。"
FOCUS_TAIL = "如果该群转发的新闻、分享存在谣言,将会自动发送辟谣链接!您也可以通过发送'停止辟谣+群名'取消对该群的谣言检查。"
CHAOYANG_INFO = '您的订阅"朝阳"有些模糊,如果您想订阅北京朝阳区,请输入订阅朝阳区,如果想订阅辽宁省朝阳市,请输入订阅朝阳市'
TIME_SPLIT = 60 * 3
SHORT_TIME_SPLIT = 60 * 5
LONG_TIME_SPLIT = 60 * 60
SEND_SPLIT = random.random() * 10
SEND_SPLIT_SHORT = random.random() * 5
HELP_CONTENT = "您好!这是微信疫情信息小助手(非官方)!我有以下功能:\n1.若好友向您发送 订阅/取消+地区名 关注/取消该地区疫情并实时向该好友推送;" \
"\n2.您向文件传输助手发送辟谣+群名,比如\"辟谣家族群\",将对该群的新闻长文、链接分享自动进行辟谣,使用停止辟谣+群名停止该功能。发送\"CX\"可查询已关注的群聊。" \
"\n以上所有数据来自腾讯\"疫情实时追踪\"平台,链接:https://news.qq.com//zt2020/page/feiyan.htm"
GROUP_CONTENT_HELP = "您对这些群启用了辟谣功能:{}。若发现漏掉了一些群,请将该群保存到通讯录再重新发送辟谣+群名。"
NO_GROUP_CONTENT_HELP = "您目前没有对任何群开启辟谣功能。若发现有遗漏,请将该群保存到通讯录再重新发送辟谣+群名。"
FILE_HELPER = 'filehelper'
ONLINE_TEXT = 'Hello, 微信疫情信息小助手(自动机器人)又上线啦'
| [
"maicius@outlook.com"
] | maicius@outlook.com |
6096776bf8dd21fe58ce88a2c47d00d6451aff58 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/purview/azure-mgmt-purview/generated_samples/accounts_list_by_resource_group.py | b8ffb87223d71e319f8cff856c97d022bb768056 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,586 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.purview import PurviewManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-purview
# USAGE
python accounts_list_by_resource_group.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PurviewManagementClient(
credential=DefaultAzureCredential(),
subscription_id="34adfa4f-cedf-4dc0-ba29-b6d1a69ab345",
)
response = client.accounts.list_by_resource_group(
resource_group_name="SampleResourceGroup",
)
for item in response:
print(item)
# x-ms-original-file: specification/purview/resource-manager/Microsoft.Purview/stable/2021-07-01/examples/Accounts_ListByResourceGroup.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
53010e1bfd118bc7c2ed2950dc17576e508e457a | 2545624bbbf982aa6243acf8b0cb9f7eaef155d6 | /2019/round_1a/rhyme.py | 28194d837671b486030f48e3044cdf0851600a4f | [] | no_license | dprgarner/codejam | 9f420003fb48c2155bd54942803781a095e984d1 | d7e1134fe3fe850b419aa675260c4ced630731d0 | refs/heads/master | 2021-07-12T05:36:08.465603 | 2021-07-03T12:37:46 | 2021-07-03T12:37:46 | 87,791,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | """
Codejam boilerplate. Copy/paste this file with get_cases and handle_case
customised.
"""
from os import sys
class BaseInteractiveCaseHandler():
"""
Boilerplate class.
"""
def __init__(self):
self.source = self.get_source()
def get_source(self):
try:
while True:
yield sys.stdin.readline()
except EOFError:
pass
def read(self):
return next(self.source).strip()
def write(self, txt):
print(str(txt))
sys.stdout.flush()
def run(self):
cases = int(next(self.source))
for i in range(1, cases + 1):
self.handle_case(i)
def debug(self, *txt):
# Uncomment for debugging.
return
print(*[str(t) for t in txt], file=sys.stderr)
def handle_case(self, i):
raise NotImplementedError
class CaseHandler(BaseInteractiveCaseHandler):
"""
https://codingcompetitions.withgoogle.com/codejam/round/0000000000051635/0000000000104e05
Practice, ~35m, 1 incorrect
"""
def handle_case(self, i):
n = int(self.read())
words = [self.read() for _ in range(n)]
soln = self.solve(words)
self.write('Case #{}: {}'.format(i, soln))
def solve(self, raw_words):
words = sorted([w[::-1] for w in raw_words])
# In python, '' is sorted before 'A'.
self.debug(words)
for accent_l in range(max(len(w) for w in words) - 1, 0, -1):
self.debug(accent_l)
i = len(words) - 2
while i >= 0:
self.debug('i', i)
self.debug(words)
self.debug(' ', i, words[i][:accent_l], words[i+1][:accent_l])
if words[i][:accent_l] == words[i+1][:accent_l]:
stem = words[i][:accent_l]
x = words.pop(i)
y = words.pop(i)
self.debug('removed ', x, y)
i -= 1
while i >= 0 and words[i][:accent_l] == stem:
i -= 1
i -= 1
return len(raw_words) - len(words)
CaseHandler().run()
| [
"dprgarner@gmail.com"
] | dprgarner@gmail.com |
13f19f5bcf551f3f4499f60bbf7cd5325f1a18a6 | fd21d6384ba36aa83d0c9f05f889bdbf8912551a | /a10sdk/core/gslb/gslb_zone_service_dns_a_record_dns_a_record_srv.py | e6e43c2ee8a7be28521783527603b200e3b349b5 | [
"Apache-2.0"
] | permissive | 0xtobit/a10sdk-python | 32a364684d98c1d56538aaa4ccb0e3a5a87ecd00 | 1ea4886eea3a1609b2ac1f81e7326758d3124dba | refs/heads/master | 2021-01-18T03:08:58.576707 | 2014-12-10T00:31:52 | 2014-12-10T00:31:52 | 34,410,031 | 0 | 0 | null | 2015-04-22T19:05:12 | 2015-04-22T19:05:12 | null | UTF-8 | Python | false | false | 2,598 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class DnsARecordSrv(A10BaseClass):
"""Class Description::
Specify DNS Address Record.
Class dns-a-record-srv supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param as_replace: {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP when enable ip-replace", "format": "flag"}
:param as_backup: {"default": 0, "optional": true, "type": "number", "description": "As backup when fail", "format": "flag"}
:param weight: {"description": "Specify weight for Service-IP (Weight value)", "format": "number", "type": "number", "maximum": 100, "minimum": 1, "optional": true}
:param svrname: {"description": "Specify name", "format": "string", "minLength": 1, "optional": false, "maxLength": 63, "type": "string", "$ref": "/axapi/v3/gslb/service-ip"}
:param disable: {"default": 0, "optional": true, "type": "number", "description": "Disable this Service-IP", "format": "flag"}
:param static: {"default": 0, "optional": true, "type": "number", "description": "Return this Service-IP in DNS server mode", "format": "flag"}
:param ttl: {"optional": true, "type": "number", "description": "Specify TTL for Service-IP", "format": "number"}
:param admin_ip: {"description": "Specify admin priority of Service-IP (Specify the priority)", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}
:param no_resp: {"default": 0, "optional": true, "type": "number", "description": "Don't use this Service-IP as DNS response", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/gslb/zone/{name}/service/{service_port}+{service_name}/dns-a-record/dns-a-record-srv/{svrname}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "svrname"]
self.b_key = "dns-a-record-srv"
self.a10_url="/axapi/v3/gslb/zone/{name}/service/{service_port}+{service_name}/dns-a-record/dns-a-record-srv/{svrname}"
self.DeviceProxy = ""
self.as_replace = ""
self.as_backup = ""
self.weight = ""
self.svrname = ""
self.disable = ""
self.static = ""
self.ttl = ""
self.admin_ip = ""
self.no_resp = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
b9ea48c03deb0ec979884538322f7b48b21f5023 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D98B/SUPMAND98BUN.py | ba37762c3029a0327ed8a98c72ab27c2349e27b6 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,855 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD98BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 1, MAX: 6},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'NAD', MIN: 0, MAX: 6, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 1},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'NAD', MIN: 1, MAX: 999999, LEVEL: [
{ID: 'DTM', MIN: 1, MAX: 15},
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'REL', MIN: 0, MAX: 99, LEVEL: [
{ID: 'NAD', MIN: 1, MAX: 1},
{ID: 'PCD', MIN: 0, MAX: 1},
]},
{ID: 'EMP', MIN: 0, MAX: 9, LEVEL: [
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'NAD', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'PAT', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
]},
]},
{ID: 'GIS', MIN: 1, MAX: 20, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'MEM', MIN: 0, MAX: 9, LEVEL: [
{ID: 'ATT', MIN: 0, MAX: 9, LEVEL: [
{ID: 'PCD', MIN: 0, MAX: 1},
]},
{ID: 'COT', MIN: 0, MAX: 99, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 3},
{ID: 'PAT', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 3},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 1},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
d6be93f57e6800ddb1758330fd39c0ad37c84fdd | 809e8079051ae2a062c4b867654d6fb7b5db722d | /test/export.py | 451c615fd8392dc1982344ca4d0dd46091790b50 | [] | no_license | OpenSourceBrain/TobinEtAl2017 | 035fa4fc490e01c98a22dcb79e707152af5b1288 | 5afd481cdf92197e1438ec483955dae20293dc63 | refs/heads/master | 2020-04-11T23:18:05.057605 | 2019-05-10T13:15:30 | 2019-05-10T13:15:30 | 162,162,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | from pyneuroml.swc.ExportSWC import convert_to_swc
files = ['bask.cell.nml', 'pyr_4_sym.cell.nml']
for f in files:
convert_to_swc(f, add_comments=True) | [
"p.gleeson@gmail.com"
] | p.gleeson@gmail.com |
a07f19ef5b2f19ab5ca36e897948080c68a05850 | ec7591c3f478c43e76257aaa500d8f6a2e763d74 | /stanza/models/constituency/evaluate_treebanks.py | 11f3084b3413a8f82eef0949f0a8023a1ec187dd | [
"Apache-2.0"
] | permissive | stanfordnlp/stanza | 5cc3dbe70a96dd565639b7dae1efde6b4fa76985 | c530c9af647d521262b56b717bcc38b0cfc5f1b8 | refs/heads/main | 2023-09-01T12:01:38.980322 | 2023-03-14T16:10:05 | 2023-03-14T16:10:05 | 104,854,615 | 4,281 | 599 | NOASSERTION | 2023-09-10T00:31:36 | 2017-09-26T08:00:56 | Python | UTF-8 | Python | false | false | 1,249 | py | """
Read multiple treebanks, score the results.
Reports the k-best score if multiple predicted treebanks are given.
"""
import argparse
from stanza.models.constituency import tree_reader
from stanza.server.parser_eval import EvaluateParser, ParseResult
def main():
parser = argparse.ArgumentParser(description='Get scores for one or more treebanks against the gold')
parser.add_argument('gold', type=str, help='Which file to load as the gold trees')
parser.add_argument('pred', type=str, nargs='+', help='Which file(s) are the predictions. If more than one is given, the evaluation will be "k-best" with the first prediction treated as the canonical')
args = parser.parse_args()
print("Loading gold treebank: " + args.gold)
gold = tree_reader.read_treebank(args.gold)
print("Loading predicted treebanks: " + args.pred)
pred = [tree_reader.read_treebank(x) for x in args.pred]
full_results = [ParseResult(parses[0], [*parses[1:]])
for parses in zip(gold, *pred)]
if len(pred) <= 1:
kbest = None
else:
kbest = len(pred)
with EvaluateParser(kbest=kbest) as evaluator:
response = evaluator.process(full_results)
if __name__ == '__main__':
main()
| [
"horatio@gmail.com"
] | horatio@gmail.com |
c9b5d98cfab617283e43057e2f975465d57c1fdb | c8fb08292d264780c8cd3ac6734dadf2b15d9818 | /doc/_gallery/plot_2_6_monocomp_nonstat_colored_gaussian_noise.py | c4ab25ebb03cf1fd16656ad864eb6eede8c230f5 | [] | no_license | dhuadaar/pytftb | d2e761ae86053b4a78f494ee3272ca3f4cde05ad | d3f5e99775cb7bc3455440ac19bd80806c47b33f | refs/heads/master | 2021-01-17T22:03:29.804566 | 2016-01-18T06:42:06 | 2016-01-18T06:42:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""
=========================
Noisy Monocomponent Chirp
=========================
This example demonstrates the construction of a monocomponent signal with
linear frequency modulation and colored Gaussian noise.
"""
from tftb.generators import fmlin, amgauss, noisecg, sigmerge
from numpy import real
import matplotlib.pyplot as plt
fm, _ = fmlin(256)
am = amgauss(256)
signal = fm * am
noise = noisecg(256, .8)
sign = sigmerge(signal, noise, -10)
plt.plot(real(sign))
plt.xlabel('Time')
plt.ylabel('Real part')
plt.title('Gaussian transient signal embedded in -10 dB colored Gaussian noise')
plt.xlim(0, 256)
plt.grid()
plt.show()
| [
"deshpande.jaidev@gmail.com"
] | deshpande.jaidev@gmail.com |
5017780ee6b8321374fe9235fe905cae865ce388 | de01cb554c2292b0fbb79b4d5413a2f6414ea472 | /algorithms/Medium/684.redundant-connection.py | 94b04ed4eec6fe7450dfa43b9c4bf80eac217280 | [] | no_license | h4hany/yeet-the-leet | 98292017eadd3dde98a079aafcd7648aa98701b4 | 563d779467ef5a7cc85cbe954eeaf3c1f5463313 | refs/heads/master | 2022-12-10T08:35:39.830260 | 2020-09-02T23:12:15 | 2020-09-02T23:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | py | #
# @lc app=leetcode id=684 lang=python3
#
# [684] Redundant Connection
#
# https://leetcode.com/problems/redundant-connection/description/
#
# algorithms
# Medium (57.63%)
# Total Accepted: 100.8K
# Total Submissions: 174.9K
# Testcase Example: '[[1,2],[1,3],[2,3]]'
#
#
# In this problem, a tree is an undirected graph that is connected and has no
# cycles.
#
# The given input is a graph that started as a tree with N nodes (with distinct
# values 1, 2, ..., N), with one additional edge added. The added edge has two
# different vertices chosen from 1 to N, and was not an edge that already
# existed.
#
# The resulting graph is given as a 2D-array of edges. Each element of edges
# is a pair [u, v] with u < v, that represents an undirected edge connecting
# nodes u and v.
#
# Return an edge that can be removed so that the resulting graph is a tree of N
# nodes. If there are multiple answers, return the answer that occurs last in
# the given 2D-array. The answer edge [u, v] should be in the same format,
# with u < v.
# Example 1:
#
# Input: [[1,2], [1,3], [2,3]]
# Output: [2,3]
# Explanation: The given undirected graph will be like this:
# 1
# / \
# 2 - 3
#
#
# Example 2:
#
# Input: [[1,2], [2,3], [3,4], [1,4], [1,5]]
# Output: [1,4]
# Explanation: The given undirected graph will be like this:
# 5 - 1 - 2
# | |
# 4 - 3
#
#
# Note:
# The size of the input 2D-array will be between 3 and 1000.
# Every integer represented in the 2D-array will be between 1 and N, where N is
# the size of the input array.
#
#
#
#
#
# Update (2017-09-26):
# We have overhauled the problem description + test cases and specified clearly
# the graph is an undirected graph. For the directed graph follow up please see
# Redundant Connection II). We apologize for any inconvenience caused.
#
#
class Solution:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
| [
"kevin.wkmiao@gmail.com"
] | kevin.wkmiao@gmail.com |
3a0f2cb630520d7d754dab75c74fc1a264fc997f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03378/s701099054.py | 973d6f5da00380a9f78771557ce253738abc986c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | n, m, x = map(int, input().split())
A = list(map(int, input().split()))
start = 0
end = 0
for i in range(x):
if i in A:
start += 1
for i in range(x,n+1):
if i in A:
end += 1
print(min(start, end)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e8dbd929840258d578174ce40685fd6ebdaa89b1 | f3dbb5b2bdbb4b45bb6548935e7873736c7adc68 | /python/dvalley_core_TestEngineer/.svn/pristine/e8/e8dbd929840258d578174ce40685fd6ebdaa89b1.svn-base | 84058cade8f93e499ed8d350d3df254da5415353 | [] | no_license | lw000/new_python_demo | ece955bdb7df2c5499e4fb582503c13081c89649 | 3ca55b2c3106db75e31676e45e6c5c1f5a6fd714 | refs/heads/master | 2020-03-26T21:09:33.082625 | 2018-08-20T05:37:54 | 2018-08-20T05:37:54 | 145,370,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,954 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
'''
Created on 2018年05月31日
@author: Administrator
'''
import sys
import demjson
import logging
import dvcore
from base_api import BaseApi
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class UserApi(BaseApi):
def __init__(self, debug = 0):
super(UserApi, self).__init__(debug=debug)
def start(self):
self.__post_user_resetPassword(method='/user/resetPassword')
self.__post_user_info(method='/user/info')
self.__post_user_baseinfo(method='/user/info')
self.__post_user_nearUser(method='/user/nearUser')
def stop(self):
pass
def __post_user_login(self, method):
params = {'phone':'13632767233', 'pwd':dvcore.webRsaEncrypt('lwstarr23133')}
r = self._send_post(method=method, token=None, params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
self.login_result_data = result['data']
print('result:' + str(result['result']))
print('data:' + str(result['data']))
return True
else:
print(str(result))
else:
log.debug(r)
return False
def __post_user_resetPassword(self, method):
params = dict(uid=self.login_result_data['uid'], old_pwd=dvcore.webRsaEncrypt('lwstarr23133'), pwd=dvcore.webRsaEncrypt('lwstarr23133'))
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r)
#获取用户信息
def __post_user_info(self, method):
params = dict(uid=self.login_result_data['uid'], to_uid=self.login_result_data['uid'])
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r)
#获取用户基本信息
def __post_user_baseinfo(self, method):
params = dict(uid=self.login_result_data['uid'], to_uid=self.login_result_data['uid'])
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r)
#附近的用户
def __post_user_nearUser(self, method):
params = dict(uid=self.login_result_data['uid'], lng='', lat='', sex=2, page=1, limit=20, distance=2000)
r = self._send_post(method=method, token=self.login_result_data['token_info']['token'], params=params)
if(r.status_code == 200):
result = demjson.decode(r.text)
if result['result'] == 1:
print('result:' + str(result['result']))
print('data:' + str(result['data']))
else:
print(str(result))
else:
log.debug(r) | [
"liwei@liweideiMac.local"
] | liwei@liweideiMac.local | |
7a99d10927155a58b60057bd7c103dbb1d282c98 | 486a7eda8f492b0607b262d7128c78427b180269 | /src/profileapp/models.py | ddb452673a3cbd06eebbd2af1cc4548f9c766c9f | [] | no_license | Komilcoder/orm | f6d09206d0e9a9b911657cf4c4c3405696d3a39d | 432d7121088aaf53a97a3c12dcf371494262c38a | refs/heads/master | 2023-03-16T06:25:11.561958 | 2021-03-15T07:07:44 | 2021-03-15T07:07:44 | 347,868,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | from django.db import models
from django.contrib.auth.models import User
class Client(models.Model):
client = models.ForeignKey(User,on_delete=models.CASCADE)
name = models.CharField(max_length=256)
def __str__(self):
return self.name
class Subject(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Products(models.Model):
subject = models.ForeignKey(Subject,on_delete=models.CASCADE, null=True)
name = models.CharField(max_length=255)
cost = models.IntegerField(default=0)
def __str__(self):
return self.name
class Order(models.Model):
product = models.ForeignKey(Products,on_delete=models.CASCADE)
amount = models.PositiveIntegerField(default=0)
total_price = models.IntegerField()
client = models.ForeignKey(Client,on_delete=models.CASCADE, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.product)
| [
"yaxshilikovkomil@gmail.com"
] | yaxshilikovkomil@gmail.com |
3e02c0db5013c15ac08acaf6e6509dea8a7ff612 | 20f89f49400feb9d2885dc2daf3ea3ca189556e7 | /day05/proctice/三元表达式.py | 5e0bb8bef2856ba4cf186a6e9284acb76a7fb6c2 | [] | no_license | xiaobaiskill/python | 201511b1b1bddec8c33c4efa7ca2cc4afed24a89 | 540693baad757369ff811fb622a949c99fb6b4ba | refs/heads/master | 2021-04-12T03:43:30.308110 | 2018-07-13T01:41:19 | 2018-07-13T01:41:19 | 125,884,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
# Author Jmz
'''
语法:
[成立1 if condition1 else
成立2 if condition2 else ...
if 成立N conditionN else 不成立]
'''
sex = 'man'
print('正确' if sex == 'man' else '错误')
# 正确
'''
语句解析:
sex = 'man'
if sex == 'man':
print('正确')
else:
print('错误')
'''
age = 23
res = '猜大了' if age > 23 else '猜小了' if age < 23 else '猜对了'
print(res)
# '猜对了'
'''
语句解析:
age = 23
if age >23:
res = '猜大了'
elif age <23:
res = '猜小了'
else:
res = '猜对了'
''' | [
"1125378902@qq.com"
] | 1125378902@qq.com |
c29200633e547e1e14f2c3a49bc8b32333405810 | 86d499787fb35024db798b0c1dbfa7a6936854e9 | /py_tools/example/http_demo/02-根据用户需要返回相应的页面.py | cef6b306a0e0db13d4dc1c3b733418c609ce81ea | [] | no_license | Tomtao626/python-note | afd1c82b74e2d3a488b65742547f75b49a11616e | e498e1e7398ff66a757e161a8b8c32c34c38e561 | refs/heads/main | 2023-04-28T08:47:54.525440 | 2023-04-21T17:27:25 | 2023-04-21T17:27:25 | 552,830,730 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | #coding:utf-8
import socket
import re
def service_client(new_socket):
'''为这个客户端返回数据'''
# 接受浏览器发送过来的请求,http请求
request = new_socket.recv(1024)
# print(request)
request_lines = request.splitlines()
print("")
print(">"*20)
print(request_lines)
'''
GET /index.html HTTP/1.1
'''
file_name = ""
ret = re.match(r"[^/]+(/[^]*)",request_lines[0])
if ret:
file_name = ret.group(1)
if file_name == "/":
file_name = "/index.html"
try:
f = open("./html"+file_name, "rb")
except:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "------file not found------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 返回http格式的数据f给浏览器
# 准备要发送给浏览器的数据---header
response = "HTTP/1.1 200 OK"
response += "\r\n"
# 准备发送给浏览器的数据---body
# response += "hhhhhhhh"
new_socket.send(response.encode("utf-8"))
new_socket.send(html_content)
new_socket.close()
def main():
#创建套接字
tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
#绑定
tcp_server.bind(("",8001))
#变为监听套接字
tcp_server.listen(128)
while True:
#等待新客户端的连接
new_socket, client_addr = tcp_server.accept()
#为这个客户端服务
service_client(new_socket)
# 关闭监听套接字
tcp_server.close()
if __name__ == "__main__":
main()
| [
"gogs@fake.local"
] | gogs@fake.local |
4572a666a39fd92088aeb374ea763180721e82f5 | 16e26dfa651770b51a7879eab5e2ab8d3d6cff92 | /src/data/make_dataset.py | b448390f2975f564afbb864c4610b03a179c80fa | [
"MIT"
] | permissive | carlosgalvez-tiendeo/ue_master-TFM | 108879efc56b01ed64ddbc0346359efcaf66a5e8 | 97ac4d3029b1eef8e95ea8cca846812cafa87d0a | refs/heads/main | 2023-06-09T11:20:59.913197 | 2021-06-23T18:14:00 | 2021-06-23T18:14:00 | 379,906,271 | 0 | 0 | MIT | 2021-06-24T11:45:31 | 2021-06-24T11:45:31 | null | UTF-8 | Python | false | false | 1,476 | py | # -*- coding: utf-8 -*-
import click
import logging
import os
import pandas as pd
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from sklearn.model_selection import train_test_split
PROJECT_DIR = Path(__file__).resolve().parents[2]
RANDOM_STATE = 288
def get_dataset():
df = pd.read_csv('src/data/ds_job.csv')
df.set_index('empleado_id', inplace=True)
X, y = df.drop('target', axis=1), df['target']
return train_test_split(X, y, test_size=0.2, random_state=RANDOM_STATE)
@click.command()
@click.argument('output_filepath', type=click.Path())
def main(output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
if not os.path.exists(output_filepath):
click.ClickException('Path doesn\'t exists').show()
return
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data...')
get_dataset(output_filepath)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| [
"carlos.galvez@tiendeo.com"
] | carlos.galvez@tiendeo.com |
decd3605c3e8f3451b507837fc9e80d6e6f84b18 | b659e99f89cf17ae886857383cb5b708847fe3f1 | /Think Python Book/caseStudy-wordPlay/exercise9.3.py | 9014f3fb5e4d0edbc08f63fb7211540e2e2a2b34 | [] | no_license | nitheeshmavila/practice-python | bea06cc4b2b9247b926e07fd5a3987552e531242 | f54bf8934a4cf160cdfc9dc43176f1eea3bc7a41 | refs/heads/master | 2021-07-03T17:24:29.450939 | 2021-06-16T08:40:48 | 2021-06-16T08:40:48 | 100,113,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | '''
Exercise 9.2
--------------
Write a function named avoids that takes a word and a string of forbidden letters, and
that returns True if the word doesn’t use any of the forbidden letters.
Modify your program to prompt the user to enter a string of forbidden letters and then
print the number of words that don’t contain any of them. Can you find a combination
of 5 forbidden letters that excludes the smallest number of words?'''
def avoids(forbidden, word):
for ch in word:
if ch in forbidden:
return False
return True
count = 0
words = open('words.txt','r')
forbidden = input('enter the forbidden string\n')
for word in words:
if avoids(word, forbidden):
count += 1
print("%d words that don’t contain any of letters in %s"%(count,forbidden))
| [
"mail2nitheeshmavila@gmail.com"
] | mail2nitheeshmavila@gmail.com |
d84c3a1d543303d754b7448bdcaed968e11c7e93 | 12971fc2b1426f3d3a52039f21c4c2d7bb820f68 | /Exercises4Programmers_57Challenges/01_saying_hello/python/hello_challenge_01.py | dd10b80c3287cb06046aadc6a60c2a3755dfa048 | [
"MIT"
] | permissive | adrianogil/AlgoExercises | 29b3c64e071008bffbfe9273130f980100381deb | be1d8d22eedade2e313458e8d89185452d9da194 | refs/heads/main | 2023-08-18T23:31:51.463767 | 2023-07-22T18:02:46 | 2023-07-22T18:02:46 | 86,254,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # The “Hello, World” program is the first program you learn
# to write in many languages, but it doesn’t involve any input.
# So create a program that prompts for your name and prints a greeting using your name.
# Example Output
# What is your name? Brian
# Hello, Brian, nice to meet you!
# Challenge:
# - Write a new version of the program without using any variables.
print(f"Hello, {input('What is your name? ')}, nice to meet you!")
| [
"adrianogil.san@gmail.com"
] | adrianogil.san@gmail.com |
f044703c469fe0463f500e944f0e552a812c0673 | 1d7618d3f1a9ffcabb18caa65d09ea48b632b80b | /tests/opt/projectors/test_proj_lpballs.py | 156fb71b1de5c9cc8e4a0c07b95fb25edf5259ab | [
"Apache-2.0"
] | permissive | carnotresearch/cr-sparse | 668b607f0edd55644db6a879ec6b24a3171b264e | c43496bc54ec32b80b5a64901b04d0a12ac6687f | refs/heads/main | 2023-05-23T19:13:06.283727 | 2022-10-08T06:37:37 | 2022-10-08T06:37:37 | 323,566,858 | 70 | 9 | null | null | null | null | UTF-8 | Python | false | false | 2,337 | py | from projectors_setup import *
# L2 Balls
@pytest.mark.parametrize("x,q,outside", [
[[3,4], 5, 0],
[[3,4], 4, 1],
])
def test_l2_ball(x, q, outside):
ball = projectors.proj_l2_ball(q)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
@pytest.mark.parametrize("x,q,b,outside", [
[[3,4], 5, 0, 0],
[[3,4], 4, 0, 1],
[[3,4], 5, [0,0], 0],
[[3,4], 4, [0,0], 1],
[[3,4], 5, [1,1], 0],
[[4,5], 4, [1,1], 1],
])
def test_l2_ball_b(x, q, b, outside):
ball = projectors.proj_l2_ball(q, b=b)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
# @pytest.mark.parametrize("x,q,b,outside", [
# [[3,4], 5, 0, 0],
# [[3,4], 4, 0, 1],
# [[3,4], 5, [0,0], 0],
# [[3,4], 4, [0,0], 1],
# [[3,4], 5, [1,1], 0],
# [[4,5], 4, [1,1], 1],
# ])
# def test_l2_ball_b_a(x, q, b, outside):
# n = len(x)
# A = jnp.eye(n)
# ball = projectors.proj_l2_ball(q, b=b, A=A)
# v = ball(x)
# if outside:
# # second projection should not change it
# assert_array_equal(ball(v), outside)
# else:
# # projection should not change it
# assert_array_equal(v, x)
# # L1 Balls
@pytest.mark.parametrize("x,q,outside", [
[[3,4], 7, 0],
[[3,4], 4, 1],
])
def test_l1_ball(x, q, outside):
ball = projectors.proj_l1_ball(q)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
@pytest.mark.parametrize("x,q,b,outside", [
[[3,4], 7, 0, 0],
[[3,4], 4, 0, 1],
[[3,4], 7, [0,0], 0],
[[3,4], 4, [0,0], 1],
[[3,4], 5, [1,1], 0],
[[4,5], 4, [1,1], 1],
])
def test_l1_ball_b(x, q, b, outside):
ball = projectors.proj_l1_ball(q, b=b)
v = ball(x)
if outside:
# second projection should not change it
assert_array_equal(ball(v), v)
else:
# projection should not change it
assert_array_equal(v, x)
| [
"shailesh@indigits.com"
] | shailesh@indigits.com |
6ffec8c541799771f50bd871e898069f9467c6d2 | 14aa22b09485b40bd1983b6b00f6b8d2f743e0c9 | /120.Triangle/main.py | fea13da0a7ed334583985b15b216c63755b3ffc8 | [] | no_license | ZhihaoJun/leetcode | 02030539815b6353b6a5588c64eebb4a10882b9d | 3cbb3f8ec81aa688ca9d90903a49cdf4a6130c4c | refs/heads/master | 2021-10-25T22:55:45.336738 | 2019-04-08T04:21:41 | 2019-04-08T04:21:41 | 33,469,539 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | class Solution(object):
def get(self, x, y):
return self.f[y][x]
def set(self, x, y, v):
if y not in self.f:
self.f[y] = {}
self.f[y][x] = v
def value(self, x, y):
return self.triangle[y][x]
def cal(self, x, y):
if y == self.size-1:
self.set(x, y, self.value(x, y))
else:
m = min(self.get(x, y+1), self.get(x+1, y+1))
self.set(x, y, m+self.value(x, y))
return self.get(x, y)
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
self.size = len(triangle)
self.triangle = triangle
self.f = {}
for y in xrange(self.size-1, -1, -1):
for x in xrange(y+1):
self.cal(x, y)
return self.cal(0, 0)
def main():
triangle = [
[2],
[3, 4],
[6, 5, 7],
[4, 1, 8, 3]
]
triangle = [[0]]
print Solution().minimumTotal(triangle)
if __name__ == '__main__':
main()
| [
"zhao11fs@gmail.com"
] | zhao11fs@gmail.com |
56577c16868cd793762ec33a8905362318f541f3 | b3f063ba06a7b4786463cc77756dde766da2b687 | /setup/load_data/load_csv.py | 1f79989374914f1f7d6295fee1dbad5e1dd66a9b | [] | no_license | Lothilius/fun-with-containers | 594b6e9c0470021fc659eda8ef182bb8a0960cc6 | fbc433323f49b13ab13b06aacda0aa57ccbd84b0 | refs/heads/master | 2022-12-22T06:42:34.069508 | 2020-10-06T09:28:30 | 2020-10-06T09:28:30 | 300,806,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | import pandas as pd
from os import environ
import logging
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
import traceback
import time
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s:%(message)s')
logger = logging.getLogger(__name__)
def connect_to_db():
alchemy_engine = create_engine('postgresql+psycopg2://%s:%s@%s/postgres' % (environ['POSTGRES_USER'],
environ['POSTGRES_PASSWORD'],
environ['POSTGRES_HOST']),
pool_recycle=3600)
return alchemy_engine.connect()
def load_csv(table_name="users"):
""" Load a CSV file in to a predefined postgres DB by creating a table with the name provided. Postgres and csv
are predefined in environment variable.
:param table_name: string of the table name to be created
:return: None
"""
# Create the Connection to the Postgres Database
try:
postgres_connection = connect_to_db()
except OperationalError:
logger.warning("DB not available. Trying again in 5 seconds")
time.sleep(5)
postgres_connection = connect_to_db()
# Load User file in to a Dataframe
users_df = pd.read_csv(environ['CSV_FILE'])
# Split id column in to components
users_df = pd.concat([users_df,
users_df['id'].apply(lambda x: pd.Series({'user_id': int(x.split(' ')[0]),
'last_four': x.split(' ')[1]}))], axis=1)
# Set User Id and last four as index
users_df.set_index(['user_id', 'last_four'], inplace=True)
# Set type for date column
users_df['visit_date'] = pd.to_datetime(arg=users_df['visit_date'])
logger.info("Attempting to create table %s", table_name)
# Create Table in Postgres DB
results = users_df[['first_name', 'last_name', 'age', 'gender', 'visit_date']].to_sql(table_name,
postgres_connection,
if_exists='replace')
logger.info("Results: %s", results)
postgres_connection.close()
logger.info("Connection to DB Closed")
if __name__ == '__main__':
try:
load_csv()
except:
logger.error(traceback.format_exc()) | [
"martin.valenzuela@bazaarvoice.com"
] | martin.valenzuela@bazaarvoice.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.