blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68615c0335fc693397ae3712ddd889db8865a5ec
|
cee65c4806593554662330368c799c14ec943454
|
/src/dms-preview/azext_dms/vendored_sdks/datamigration/models/project_task.py
|
f06065a5bf044dac5eb01d8d79ccba2c49e5d1d2
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
azclibot/azure-cli-extensions
|
d5d1a4ecdfc87fd79f5ad042fb85cdbf881897d2
|
c230646258d4b56efb7d44eb7a0230f2943da6f6
|
refs/heads/master
| 2023-08-28T03:55:02.311902
| 2019-04-04T16:05:45
| 2019-04-04T16:05:45
| 179,548,695
| 1
| 1
|
MIT
| 2021-07-28T15:26:17
| 2019-04-04T17:54:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ProjectTask(Resource):
"""A task resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param etag: HTTP strong entity tag value. This is ignored if submitted.
:type etag: str
:param properties: Custom task properties
:type properties: ~azure.mgmt.datamigration.models.ProjectTaskProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ProjectTaskProperties'},
}
def __init__(self, **kwargs):
super(ProjectTask, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.properties = kwargs.get('properties', None)
|
[
"wx44@cornell.edu"
] |
wx44@cornell.edu
|
308397ed048cf03a190ffa0c99b55d07196a45cf
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_97/591.py
|
65f897b1fd4fe4677b641f162d03e1a08dcae786
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
# Recycled Numbers
# main code
fr = open('C-large.in', 'r')
fw = open('C-large.out', 'w')
numOfTestCase = int(fr.readline())
for x in range(0,numOfTestCase):
result = ""
print("========== Test case " + str(x+1) + " ==========")
line = fr.readline()
line = line.split(" ")
A = int(line[0])
B = int(line[1])
# initialize number of distinct recycle number
nDistinct = 0
for i in range(A,B+1):
# change to string
i_str = str(i)
i_str_recycle = i_str
strlen = len(i_str)
if strlen == 1:
# No recycle number possible
continue
from array import array
pairList = array('i')
for j in range(0,strlen):
i_str_recycle = i_str_recycle[strlen-1] + i_str_recycle[0:strlen-1]
if i_str_recycle != i_str and i_str_recycle[0] != '0' and (A <= int(i_str_recycle) and int(i_str_recycle) <= B) and int(i_str_recycle) > i :
# i_str_recycle should not be the same as i_str
# i_str_recycle should not be lead with digit 0
# i_str_recycle should not be in range A to B inclusive
# i_str_recycle should be bigger than i
repeatFlag = 0
# finally, there should be no repeat pair
for k in range(0,len(pairList)):
if pairList[k] == int(i_str_recycle):
repeatFlag = 1
if repeatFlag == 0:
nDistinct = nDistinct + 1
# print(i_str + ", " + i_str_recycle)
# put current pair to pairList to prevent double pair
pairList.append(int(i_str_recycle))
result = str(nDistinct)
fw.write("Case #" + str(x+1) + ": " + result + "\n")
fr.close()
fw.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2136ceed7ded2995dc97b82ced276854c3146f10
|
6a044f45cd09695ea6f66f35bb8decf86a84607d
|
/installer/resources/pacbot_app/alb_https_listener.py
|
e285a11743e84dfb2706dd3e7435a718e88798c8
|
[
"Apache-2.0"
] |
permissive
|
ritesh74/pacbot
|
a07bdf82632342509f05b5c5dbb6eb6aaba40219
|
4b5361d99e7efbbc5603ec9c6568ba639105c773
|
refs/heads/master
| 2021-07-09T15:35:27.342903
| 2020-09-28T20:36:42
| 2020-09-28T20:36:42
| 199,405,428
| 1
| 0
|
Apache-2.0
| 2019-07-29T07:53:26
| 2019-07-29T07:53:25
| null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
from core.terraform.resources.aws.load_balancer import ALBListenerResource, ALBListenerRuleResource
from core.config import Settings
from resources.pacbot_app.alb import ApplicationLoadBalancer
from resources.pacbot_app import alb_target_groups as tg
PATH_PREFIX = '/api/'
class PacBotHttpsListener(ALBListenerResource):
load_balancer_arn = ApplicationLoadBalancer.get_output_attr('arn')
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = Settings.get('SSL_CERTIFICATE_ARN')
default_action_target_group_arn = tg.NginxALBTargetGroup.get_output_attr('arn')
default_action_type = "forward"
class BaseLR:
listener_arn = PacBotHttpsListener.get_output_attr('arn')
action_type = "forward"
condition_field = "path-pattern"
class ConfigALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.ConfigALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "config*"]
class AdminALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.AdminALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "admin*"]
class ComplianceALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.ComplianceALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "compliance*"]
class NotificationsALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.NotificationsALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "notifications*"]
class StatisticsALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.StatisticsALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "statistics*"]
class AssetALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.AssetALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "asset*"]
class AuthALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.AuthALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "auth*"]
class VulnerabilityALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.VulnerabilityALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "vulnerability*"]
|
[
"sanjnur@gmail.com"
] |
sanjnur@gmail.com
|
8848a2025763c8c406ee3f306ba82e82e8db0a70
|
bf12e13c0ab5ccf2fc32509b02aaae6b6a2e3327
|
/examples/hello_rect.py
|
9f1bcbed6deb09a08632a4cee248572761d502d9
|
[
"MIT",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
HighCWu/tpythonpp
|
42b56c9eb3c77192cbda36f0e198707bb858fe38
|
f1c15e1101993e4c9c7529739823b47759ea13f7
|
refs/heads/master
| 2023-06-30T16:17:09.409107
| 2021-03-19T04:16:12
| 2021-03-19T04:16:12
| 391,806,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
def test():
print('hello world')
r = rect(0,0, 320, 240)
print(r)
print(r.x)
print(r.y)
print(r.width)
print(r.height)
r.x = 2
print(r.x)
r.y += 0.1
print(r.y)
area = r.get_area()
print(area)
test()
|
[
"goatman.py@gmail.com"
] |
goatman.py@gmail.com
|
90f7ab7711d5790e74f9518e25d8c39a79edafd8
|
7b5828edda7751700ca7002b40a214e39e5f48a8
|
/EA/simulation/server_commands/service_npc_commands.py
|
07e4d57abbcbb8550b95b267c098277d3e6c293a
|
[] |
no_license
|
daniela-venuta/Sims-4-Python-Script-Workspace
|
54c33dac02f84daed66f46b7307f222fede0fa62
|
f408b28fb34626b2e3b2953152343d591a328d66
|
refs/heads/main
| 2023-03-29T18:08:39.202803
| 2021-03-30T19:00:42
| 2021-03-30T19:00:42
| 353,111,243
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
from date_and_time import create_time_span
from sims4.commands import CommandType
import services
import sims4.commands
@sims4.commands.Command('service_npc.request_service', command_type=CommandType.Cheat)
def request_service(service_npc_type:str, household_id=None, _connection=None):
service_npc_tuning = services.service_npc_manager().get(service_npc_type)
if service_npc_tuning is not None:
tgt_client = services.client_manager().get(_connection)
if tgt_client is None:
return False
else:
if household_id is None:
household = tgt_client.household
else:
household_id = int(household_id)
manager = services.household_manager()
household = manager.get(household_id)
if household is None:
household = tgt_client.household
services.current_zone().service_npc_service.request_service(household, service_npc_tuning)
sims4.commands.output('Requesting service {0}'.format(service_npc_type), _connection)
return True
return False
@sims4.commands.Command('service_npc.fake_perform_service')
def fake_perform_service(service_npc_type:str, _connection=None):
service_npc_tuning = services.service_npc_manager().get(service_npc_type)
if service_npc_tuning is not None:
tgt_client = services.client_manager().get(_connection)
if tgt_client is None:
return False
else:
household = tgt_client.household
service_npc_tuning.fake_perform(household)
return True
return False
@sims4.commands.Command('service_npc.cancel_service', command_type=CommandType.Automation)
def cancel_service(service_npc_type:str, max_duration:int=240, _connection=None):
service_npc_tuning = services.service_npc_manager().get(service_npc_type)
if service_npc_tuning is not None:
tgt_client = services.client_manager().get(_connection)
if tgt_client is None:
return False
else:
household = tgt_client.household
services.current_zone().service_npc_service.cancel_service(household, service_npc_tuning)
return True
return False
@sims4.commands.Command('service_npc.toggle_auto_scheduled_services', command_type=CommandType.Automation)
def toggle_auto_scheduled_services(enable:bool=None, max_duration:int=240, _connection=None):
service_npc_service = services.current_zone().service_npc_service
enable_auto_scheduled_services = enable if enable is not None else not service_npc_service._auto_scheduled_services_enabled
service_npc_service._auto_scheduled_services_enabled = enable_auto_scheduled_services
return True
|
[
"44103490+daniela-venuta@users.noreply.github.com"
] |
44103490+daniela-venuta@users.noreply.github.com
|
7ba1d723327bdcf4aef6f5d70f72674ce22431c7
|
5774101105b47d78adb7a57eefdfa21502bbd70c
|
/python 语法基础/d14_tkinter_python图形开发界面库/tkinter/3.button控件.py
|
82895d1a97556d89642d20d2a44eb01bb5377143
|
[] |
no_license
|
zhlthunder/python-study
|
34d928f0ebbdcd5543ae0f41baaea955c92f5c56
|
0f25dd5105ba46791842d66babbe4c3a64819ee5
|
refs/heads/master
| 2023-01-12T18:39:47.184978
| 2018-10-07T23:48:04
| 2018-10-07T23:48:04
| 90,516,611
| 0
| 1
| null | 2022-12-26T19:46:22
| 2017-05-07T07:39:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 615
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:zhl
import tkinter
def func():
print("zhl is good man")
win=tkinter.Tk()
win.title("zhl")
win.geometry("400x400+200+0")
##text:定义按钮上显示的命名
##command 定义点击按钮触发的函数
##height,width: 设置按钮的宽高
button1=tkinter.Button(win,text="按钮",command=func,width=5,height=5)
button1.pack()
button2=tkinter.Button(win,text="按钮",command=lambda:print("it is button2"),width=5,height=5)
button2.pack()
button3=tkinter.Button(win,text="退出",command=win.quit,width=5,height=5)
button3.pack()
win.mainloop()
|
[
"zhlthunder@163.com"
] |
zhlthunder@163.com
|
3537ab717502779be66add592bf5cff21cb46dca
|
322e3003cc14c9beb7aa47363ca3c2f6038b82d5
|
/lecture6/pyspark/basics.py
|
b7cd3c6d03c81beae10b26d0f9da81724997ec3c
|
[] |
no_license
|
danielvachalek/MLOps
|
039a393c71a418383ea46338e2d415e7c3936b56
|
0746e0380b73d93b2f12a22df04a74de7daf18a0
|
refs/heads/master
| 2023-02-09T01:21:53.874657
| 2021-01-02T22:49:32
| 2021-01-02T22:49:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,011
|
py
|
# Databricks notebook source
# MAGIC %md In Cmd 2, the AWS_ACCESS_KEY and AWS_SECRET_KEY variables are set and kept hidden.
# COMMAND ----------
AWS_ACCESS_KEY = "AA"
AWS_SECRET_KEY = "BB"
# COMMAND ----------
sc._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", AWS_ACCESS_KEY)
sc._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", AWS_SECRET_KEY)
# COMMAND ----------
df = spark.read.csv("s3://databricks-recsys/u.data",header=True, sep="\t",inferSchema = True)
display(df)
# COMMAND ----------
s3path = "s3://databricks-recsys/"
df.write.parquet(s3path+"u.parquet")
# COMMAND ----------
df_parquet = spark.read.parquet(s3path+"u.parquet").show()
# COMMAND ----------
pdf = df.toPandas()
# COMMAND ----------
pdf.head()
# COMMAND ----------
sdf = sqlContext.createDataFrame(pdf)
# COMMAND ----------
sdf.describe()
# COMMAND ----------
sdf.printSchema()
# COMMAND ----------
import databricks.koalas as ks
kdf = sdf.to_koalas()
kdf['iid'].to_numpy()[:3]
# COMMAND ----------
type(ks.from_pandas(pdf))
# COMMAND ----------
sdf.createOrReplaceTempView('sdf')
# COMMAND ----------
query = 'select distinct iid from sdf order by iid'
spark.sql(query).show()
# COMMAND ----------
movies_sdf = spark.read.csv("s3://databricks-recsys/movies_raw.dat",header=False, sep="|",inferSchema = True)
display(movies_sdf)
# COMMAND ----------
movies_sdf.createOrReplaceTempView('movies_sdf')
# COMMAND ----------
query = """
select sdf.iid, avg(sdf.rating) as avg_rating, count(sdf.rating) as num_rating, first(movies_sdf._c1) as movie
from sdf,movies_sdf
where sdf.iid = movies_sdf._c0
group by iid
having num_rating >= 5
order by avg_rating desc
limit 10
"""
top_movies_sdf = spark.sql(query)
# COMMAND ----------
top_movies_kdf = top_movies_sdf.to_koalas()
top_movies_kdf.head()
# COMMAND ----------
display(top_movies_sdf)
# COMMAND ----------
sdf_grouped = sdf.groupBy("iid").agg({'rating':'avg'})
pdf_grouped = sdf_grouped.toPandas()
len(pdf_grouped)
|
[
"you@example.com"
] |
you@example.com
|
79e82a9736205ebba06486b564cb8925c6d74af9
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/torch/_utils.py
|
55f737a5974002ee337bfdaf7d920c2472a0fe84
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:03a04fffa1996df6bcab4d2bf79566d0e6b7d661fe5e37b292b5a766d648edfa
size 19786
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
e554ed1e3f0ef7ab8afac5e92e7db32e3179c2ce
|
05a090ee8f9d6dc6bbcc3d20cf8d4a7c8a627bde
|
/kash/migrations/0003_auto_20201203_1658.py
|
9da1a6f5d84271275e7fa8f9401985f96c9af90e
|
[] |
no_license
|
Komilcoder/kash_app
|
527e84c63b03264f72aba4e2d3039a219beae556
|
88ab937c3391b1104bbdbf733da49634ea645ecf
|
refs/heads/master
| 2023-01-21T00:46:24.482017
| 2020-12-03T15:52:01
| 2020-12-03T15:52:01
| 318,185,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
# Generated by Django 3.1.4 on 2020-12-03 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kash', '0002_auto_20201203_1648'),
]
operations = [
migrations.RemoveField(
model_name='news',
name='description',
),
migrations.RemoveField(
model_name='news',
name='title',
),
migrations.RemoveField(
model_name='tag',
name='name',
),
migrations.AddField(
model_name='news',
name='description_ru',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='news',
name='description_uz',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='news',
name='title_ru',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='news',
name='title_uz',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='tag',
name='name_ru',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='tag',
name='name_uz',
field=models.CharField(max_length=100, null=True),
),
]
|
[
"yaxshilikovkomil@gmail.com"
] |
yaxshilikovkomil@gmail.com
|
a4aa3cb66427702daeca11f1eba49736ef4dd8e8
|
e81576012330e6a6024d14f3e241f88ca34b73cd
|
/python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cps/models/Listener.py
|
f4508baf200e067c53fd0aa337482b86f24fb929
|
[
"MIT"
] |
permissive
|
Ureimu/weather-robot
|
eba6a84147755aa83c941a306bac1a7c4e95e23e
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
refs/heads/master
| 2021-01-15T07:23:42.274413
| 2020-03-23T02:30:19
| 2020-03-23T02:30:19
| 242,912,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,761
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Listener(object):
def __init__(self, listenerId=None, loadBalancerId=None, protocol=None, port=None, algorithm=None, stickySession=None, realIp=None, status=None, name=None, description=None, healthCheck=None, healthCheckTimeout=None, healthCheckInterval=None, healthyThreshold=None, unhealthyThreshold=None, healthCheckIp=None, serverGroupId=None):
"""
:param listenerId: (Optional) 监听器ID
:param loadBalancerId: (Optional) 负载均衡ID
:param protocol: (Optional) 协议
:param port: (Optional) 端口
:param algorithm: (Optional) 调度算法
:param stickySession: (Optional) 会话保持状态,取值on|off
:param realIp: (Optional) 获取真实ip
:param status: (Optional) 状态
:param name: (Optional) 名称
:param description: (Optional) 描述
:param healthCheck: (Optional) 健康检查状态,取值on|off
:param healthCheckTimeout: (Optional) 健康检查响应的最大超时时间,单位s
:param healthCheckInterval: (Optional) 健康检查响应的最大间隔时间,单位s
:param healthyThreshold: (Optional) 健康检查结果为success的阈值
:param unhealthyThreshold: (Optional) 健康检查结果为fail的阈值
:param healthCheckIp: (Optional) 健康检查ip
:param serverGroupId: (Optional) 服务器组id
"""
self.listenerId = listenerId
self.loadBalancerId = loadBalancerId
self.protocol = protocol
self.port = port
self.algorithm = algorithm
self.stickySession = stickySession
self.realIp = realIp
self.status = status
self.name = name
self.description = description
self.healthCheck = healthCheck
self.healthCheckTimeout = healthCheckTimeout
self.healthCheckInterval = healthCheckInterval
self.healthyThreshold = healthyThreshold
self.unhealthyThreshold = unhealthyThreshold
self.healthCheckIp = healthCheckIp
self.serverGroupId = serverGroupId
|
[
"a1090693441@163.com"
] |
a1090693441@163.com
|
d089134e584d9b0d118d8a1e547907c28db88b65
|
9bc9885e9500083afc2cd6be4ff93ee2eb4fbfbb
|
/neuropower/apps/designtoolbox/migrations/0016_auto_20160907_1914.py
|
5dbb860d6d601d9fe11f927ee47970a6189b6b04
|
[
"MIT"
] |
permissive
|
jokedurnez/neuropower
|
50297af01bef55fe2c01355f038a9d184cde493d
|
ed8c1cf29d447b41dfbfbc7a8345443454e62a96
|
refs/heads/master
| 2021-01-15T08:36:45.191330
| 2016-11-20T00:56:30
| 2016-11-20T00:56:30
| 51,338,446
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-07 19:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('designtoolbox', '0015_auto_20160905_1717'),
]
operations = [
migrations.AlterField(
model_name='designmodel',
name='W1',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='designmodel',
name='W2',
field=models.FloatField(default=0.5),
),
migrations.AlterField(
model_name='designmodel',
name='cycles',
field=models.IntegerField(default=100),
),
migrations.AlterField(
model_name='designmodel',
name='preruncycles',
field=models.IntegerField(default=10),
),
]
|
[
"joke.durnez@gmail.com"
] |
joke.durnez@gmail.com
|
36baeb280fe445880f582412b5f140997661f413
|
f4f181f2c970a163801b4202fc8d6c92a4e8113d
|
/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/addresses_utils.py
|
c24143684ff0332856f3e3da17ca890a539e8ee5
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
Sorsly/subtle
|
7732a6cb910f5e2f4eed1ac0d3b5979001582340
|
718e79a3e04f1f57f39b6ebe90dec9e028e88d40
|
refs/heads/master
| 2021-05-24T01:21:39.218495
| 2017-10-28T01:33:58
| 2017-10-28T01:33:58
| 83,103,372
| 0
| 1
|
MIT
| 2020-07-25T11:21:05
| 2017-02-25T03:33:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,225
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for addresses."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import name_generator
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
class AddressesMutator(base_classes.BaseAsyncMutator):
"""Base class for modifying addresses."""
@property
def service(self):
if self.global_request:
return self.compute.globalAddresses
else:
return self.compute.addresses
@property
def resource_type(self):
return 'addresses'
@property
def method(self):
return 'Insert'
def GetAddress(self, args, address, address_ref):
return self.messages.Address(
address=address,
description=args.description,
name=address_ref.Name())
def CreateRequests(self, args):
"""Overrides."""
names, addresses = self._GetNamesAndAddresses(args)
if not args.name:
args.name = names
address_refs = self.ADDRESSES_ARG.ResolveAsResource(
args, self.resources,
scope_lister=compute_flags.GetDefaultScopeLister(
self.compute_client, self.project))
self.global_request = getattr(address_refs[0], 'region', None) is None
requests = []
for address, address_ref in zip(addresses, address_refs):
address_msg = self.GetAddress(
args,
address,
address_ref)
if self.global_request:
requests.append(self.messages.ComputeGlobalAddressesInsertRequest(
address=address_msg, project=address_ref.project))
else:
requests.append(self.messages.ComputeAddressesInsertRequest(
address=address_msg,
region=address_ref.region,
project=address_ref.project))
return requests
def _GetNamesAndAddresses(self, args):
"""Returns names and addresses provided in args."""
if not args.addresses and not args.name:
raise exceptions.ToolException(
'At least one name or address must be provided.')
if args.name:
names = args.name
else:
# If we dont have any names then we must some addresses.
names = [name_generator.GenerateRandomName() for _ in args.addresses]
if args.addresses:
addresses = args.addresses
else:
# If we dont have any addresses then we must some names.
addresses = [None] * len(args.name)
if len(addresses) != len(names):
raise exceptions.ToolException(
'If providing both, you must specify the same number of names as '
'addresses.')
return names, addresses
|
[
"han300@purdue.edu"
] |
han300@purdue.edu
|
10ef50de6e155b9bb542e5c845172fff8a2bb9e6
|
16e26614611ae87de81388b435d88b142ca6189e
|
/pywind/decc/Report.py
|
d66f3fe5a42edfea4de68937f5228223d475ef93
|
[] |
no_license
|
tomwadley/pywind
|
d1d36007b0196730cba1389ef7940dd0ccabe5df
|
0d86ff1c9a67b2f446e62c1471257e38bdc1d03c
|
refs/heads/master
| 2021-04-09T15:41:49.823115
| 2013-06-26T09:18:24
| 2013-06-26T09:18:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,726
|
py
|
from cookielib import CookieJar
import csv
from datetime import datetime
import urllib2
from pywind.decc.geo import osGridToLatLong, LatLon
def field_to_attr(fld):
fld = fld.lower()
for c in [' ', '-', '/']:
fld = fld.replace(c, '_')
return fld
class DeccRecord(object):
FIELDS = ['Reference',
'NFFO/SRO/NI-NFFO/Non-NFFO',
'General Technology',
'Technology Type',
'Section 36',
'Contractor (/Applicant)',
'Site Name',
'Installed Capacity (Elec)',
'CHP',
'OffShore Wind Round',
'Address 1',
'Address 2',
'Address 3',
'Address 4',
'Town',
'County',
'District',
'Region',
'Country',
'X Coord',
'Y Coord',
'Pre-consent',
'Post-consent',
'Application Submitted',
'Application Determined',
'Construction Date',
'Planning Officer Recommendation',
'Appeal Determined',
'Appeal Ref Number',
'Date on which generation commenced',
'Green Belt',
'National Park',
'AONB',
'Heritage Coast',
'Special Landscape Area',
'Employment Use',
'Natural Environment',
'Other Land Use',
'Built Heritage/ Archaeology',
'Project Specific',
'Relevant Supporting Details',
'Developer Last Contacted',
'LPA / CC Last Contacted',
'LPA Name',
'Record Last Updated'
]
DATE_FIELDS = ['record_last_updated',
'application_submitted',
'application_determined',
'appeal_determined'
]
BOOLEAN_FIELDS = ['section_36',
'green_belt',
'national_park',
'aonb',
'heritage_coast',
'special_landscape_area',
'employment_use',
'natural_environment',
'other_land_use',
'built_heritage__archaeology',
'project_specific'
]
INT_FIELDS = ['x_coord', 'y_coord']
def __init__(self, row):
for i in range(len(self.FIELDS)):
attr = field_to_attr(self.FIELDS[i])
setattr(self, attr, row[i])
for f in self.BOOLEAN_FIELDS:
val = getattr(self, f, None)
if val is None:
continue
setattr(self, f, False if val.lower() == 'false' else True)
for f in self.DATE_FIELDS:
val = getattr(self, f, None)
if val is None:
continue
if val == '':
setattr(self, f, None)
else:
setattr(self, f, datetime.strptime(val, "%Y-%m-%d").date())
for f in self.INT_FIELDS:
val = getattr(self, f, 0)
if val == '':
val = 0
setattr(self, f, float(val))
mw_capacity = getattr(self, 'installed_capacity_(elec)', 0)
mw_capacity = float(mw_capacity.replace(',', ''))
setattr(self, 'installed_capacity_(elec)', mw_capacity * 1000)
setattr(self, 'capacity', getattr(self, 'installed_capacity_(elec)'))
# Convert x,y to lat/lon
latlon = osGridToLatLong(int(self.x_coord), self.y_coord)
latlon.convert(LatLon.WGS84)
setattr(self, 'lat', latlon.lat)
setattr(self, 'lon', latlon.lon)
def Dump(self):
for f in self.FIELDS:
print "%-30s: %s" % (f, getattr(self, field_to_attr(f), ''))
class MonthlyExtract(object):
URL = "https://restats.decc.gov.uk/app/reporting/decc/monthlyextract/style/csv/csvwhich/reporting.decc.monthlyextract"
def __init__(self):
self.cookieJar = CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
httpsHandler = urllib2.HTTPSHandler(debuglevel = 0)
self.opener = urllib2.build_opener(cookie_handler, httpsHandler)
self.records = []
def __len__(self):
return len(self.records)
def get_data(self):
resp = self.opener.open(self.URL)
if resp.code != 200:
return False
reader = csv.reader(resp)
for row in reader:
if row[0] == 'Reference':
continue
d = DeccRecord(row)
self.records.append(d)
return True
|
[
"zathrasorama@gmail.com"
] |
zathrasorama@gmail.com
|
47005a3669df2d29e09979c2bfd2bb18fede9e59
|
b7e52aeabebf7448e31723d406755809cac63099
|
/source/calc_fsun_tree/SConstruct
|
9e270b942ba10dbe21a51f6c85d05cb22f763ba5
|
[
"BSD-3-Clause"
] |
permissive
|
bucricket/projectMASviirs
|
df31af86e024499ff87d2c2b707e3b9d24813f7c
|
705abc89505122351f0ef78e0edb950b7e3b7f48
|
refs/heads/master
| 2021-01-01T18:31:16.748864
| 2018-05-30T15:14:07
| 2018-05-30T15:14:07
| 98,354,619
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
#!python
import os
import platform
import subprocess
AddOption('--prefix',
dest='prefix',
type='string',
nargs=1,
action='store',
metavar='DIR',
help='installation prefix')
env = Environment(PREFIX = GetOption('prefix'))
prefix = os.environ.get('PREFIX')
base1 = os.path.abspath(os.path.join(prefix,os.pardir))
base = os.path.join(base1,'work')
sourcePath = os.path.join(base,'source')
binPath = os.path.join(prefix,'bin')
# Comment lines start with the # symbol
# The following sets up an Compile Environment Object with gfortran as the linker.
env = Environment(LINK='gfortran')
env.Append(F90FLAGS = ['-ffree-line-length-512'])
# The next line of code is an array of the source files names used in the program.
# The next line is the actual code that links the executable. env.Program is generates an executable.
make_csv = env.Program(target='make_csv', source= 'make_csv.f90')
env.Install(binPath, make_csv)
env.Alias('install', binPath)
|
[
"bucricket@gmail.com"
] |
bucricket@gmail.com
|
|
030faf212e0c96085fe19ef5907653e0f6de769f
|
cc6e36ce306a46c1accc3e979362de34b6063b7e
|
/game/management/commands/import_games.py
|
1be5196a35dfa7b39b0710727bda8fd09f034eca
|
[] |
no_license
|
bartromgens/petanque-stats-server
|
d51995e2b4d288a0a99563347c3bf3db863918bf
|
9f7e48a7670b1c2c89f1bfcb2ac5ed8c8e9a7fe0
|
refs/heads/master
| 2020-03-22T19:23:18.230361
| 2018-07-29T00:46:02
| 2018-07-29T00:46:02
| 140,524,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
import json
import uuid
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from django.db import transaction
from game.models import Game, Team, ScoreTeam, Player
class Command(BaseCommand):
help = 'Import scores from a custom format'
players_filepath = 'data/players.json'
games_filepath = 'data/games.json'
@transaction.atomic
def handle(self, *args, **options):
print('BEGIN: import games')
# Command.create_admin()
Command.create_users()
games = Command.create_games()
for game in games:
print(game)
print('END: import games')
@staticmethod
def create_admin():
User.objects.create_superuser(
username='admin',
email='admin@test.com',
password='admin'
)
@staticmethod
def create_users():
with open(Command.players_filepath, 'r') as players_file:
players_json = json.load(players_file)
for username in players_json['players']:
User.objects.create_user(
username=username,
email=username + '@test.com',
password=uuid.uuid4().hex[:10]
)
@staticmethod
def create_games():
games = []
with open(Command.games_filepath, 'r') as players_file:
games_json = json.load(players_file)
for game_data in games_json['games']:
games.append(Command.create_game(game_data))
return games
@staticmethod
def create_game(game_data):
game = Game.objects.create(max_score=game_data['max_score'])
for score in game_data['scores']:
team_players_ids = []
for name in score['players']:
team_players_ids.append(Player.get_by_name(name).id)
team = Team.get_or_create_team(team_players_ids)
game.teams.add(team)
ScoreTeam.objects.create(team=team, game=game, score=score['score'])
game.save()
return game
|
[
"bart.romgens@gmail.com"
] |
bart.romgens@gmail.com
|
6d25cc07becb9e59f730a67748abcca1e17b92d4
|
770f7b7155c33d2f8c27846b93b9b73db45b2e2a
|
/gofedinfra/system/plugins/simpleetcdstorage/fakeartefactdriver.py
|
10293902c919cd5c9cb74e6cf1c783345335fd8e
|
[] |
no_license
|
gofed/infra
|
b0f6186486e8aa7c8c640411ee92d6648cbc77ec
|
2f402bbdf1e5fa7cb68262cc3408a2fc1436269f
|
refs/heads/master
| 2022-10-16T02:46:09.226939
| 2018-06-07T23:16:44
| 2018-06-08T11:31:37
| 48,703,326
| 1
| 5
| null | 2022-10-11T11:17:16
| 2015-12-28T17:08:28
|
Python
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
from .artefactdriver import ArtefactDriver
class FakeArtefactDriver(ArtefactDriver):
def __init__(self, artefact):
ArtefactDriver.__init__(self, artefact)
self.data = {}
def store(self, input):
key = self._generateKey(input)
self.data[key] = input
def retrieve(self, data):
key = self._generateKey(data)
return self.data[key]
|
[
"jchaloup@redhat.com"
] |
jchaloup@redhat.com
|
742e69c1a22297de8f0a8cd58cecab3389d6f888
|
a281d09ed91914b134028c3a9f11f0beb69a9089
|
/tests/integration/docusaurus/connecting_to_your_data/cloud/gcs/pandas/inferred_and_runtime_yaml_example.py
|
e7ed0539f4232487def269d75f60499c9e167e07
|
[
"Apache-2.0"
] |
permissive
|
CarstenFrommhold/great_expectations
|
4e67bbf43d21bc414f56d576704259a4eca283a5
|
23d61c5ed26689d6ff9cec647cc35712ad744559
|
refs/heads/develop
| 2023-01-08T10:01:12.074165
| 2022-11-29T18:50:18
| 2022-11-29T18:50:18
| 311,708,429
| 0
| 0
|
Apache-2.0
| 2020-11-10T15:52:05
| 2020-11-10T15:52:04
| null |
UTF-8
|
Python
| false
| false
| 4,209
|
py
|
from typing import List
# <snippet>
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest
# </snippet>
# <snippet>
context = ge.get_context()
# </snippet>
# <snippet>
datasource_yaml = rf"""
name: my_gcs_datasource
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetGCSDataConnector
bucket_or_name: <YOUR_GCS_BUCKET_HERE>
prefix: <BUCKET_PATH_TO_DATA>
default_regex:
pattern: (.*)\.csv
group_names:
- data_asset_name
"""
# </snippet>
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace("<YOUR_GCS_BUCKET_HERE>", "test_docs_data")
datasource_yaml = datasource_yaml.replace(
"<BUCKET_PATH_TO_DATA>", "data/taxi_yellow_tripdata_samples"
)
context.test_yaml_config(datasource_yaml)
# <snippet>
context.add_datasource(**yaml.load(datasource_yaml))
# </snippet>
# Here is a RuntimeBatchRequest using a path to a single CSV file
# <snippet>
batch_request = RuntimeBatchRequest(
datasource_name="my_gcs_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="<YOUR_MEANGINGFUL_NAME>", # this can be anything that identifies this data_asset for you
runtime_parameters={"path": "<PATH_TO_YOUR_DATA_HERE>"}, # Add your GCS path here.
batch_identifiers={"default_identifier_name": "default_identifier"},
)
# </snippet>
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the BatchRequest above.
batch_request.runtime_parameters[
"path"
] = f"gs://test_docs_data/data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-01.csv"
# <snippet>
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# </snippet>
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
batch_list: List[Batch] = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
batch: Batch = batch_list[0]
assert batch.data.dataframe.shape[0] == 10000
# Here is a BatchRequest naming a data_asset
# <snippet>
batch_request = BatchRequest(
datasource_name="my_gcs_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# </snippet>
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = (
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-01"
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["my_gcs_datasource"]
assert set(
context.get_available_data_asset_names()["my_gcs_datasource"][
"default_inferred_data_connector_name"
]
) == {
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-01",
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-02",
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-03",
}
batch_list: List[Batch] = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
batch: Batch = batch_list[0]
assert batch.data.dataframe.shape[0] == 10000
|
[
"noreply@github.com"
] |
CarstenFrommhold.noreply@github.com
|
a49e2981e3bf1e5622c75bb54165b0f55cecfe87
|
31c94ea00f0f6673f161a21a529f2272e7460a34
|
/bindapi/routerApi.py
|
4ea16209cd1e7101e783bcfeaa0d6ef1853e83b0
|
[] |
no_license
|
xgocn/bindapi
|
0d3e51f696a9d3ec5dde4e05d1c2d5eb2fe52f5a
|
343f07176de43c3e5ffc9b26c479c47c289fdc0e
|
refs/heads/master
| 2023-03-15T08:01:22.550448
| 2018-05-09T06:19:22
| 2018-05-09T06:19:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
# author: kiven
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
from bind.views import DomainViewSet, RecordViewSet, AllDomainViewSet, XfrAclViewSet
router.register(r'domains', DomainViewSet)
router.register(r'records', RecordViewSet)
router.register(r'xfracls', XfrAclViewSet)
router.register(r'alldomains', AllDomainViewSet, base_name='alldomains')
from analyze.views import DomainNodeViewSet, DomainStatusViewSet
router.register(r'domainnodes', DomainNodeViewSet)
router.register(r'domainstatus', DomainStatusViewSet)
|
[
"kevin@126.com"
] |
kevin@126.com
|
17b6a63f5fd62343b7a3cb8a859ed7ef5cd184f7
|
521648e4e12366760da7baff15d35201e0b19a5e
|
/django_ansible/shell.py
|
5dc1d4613af2d30156968e00bd67fc7c89c10b8e
|
[] |
no_license
|
sashgorokhov/django-ansible
|
b54f596f7d50d239474eb2d4fd8e85c0da21f959
|
ad32255b7c87bcada1bd6c8aa250c1ec52c8cd49
|
refs/heads/master
| 2021-01-22T07:52:55.457727
| 2017-02-13T19:46:23
| 2017-02-13T19:46:23
| 81,864,171
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
import subprocess
import logging
logger = logging.getLogger(__name__)
def _try_decode(b):
try:
return b.decode()
except:
return b
def run(executable, args, env=None, cwd=None, **kwargs):
"""
:param kwargs: Additional arguments passed to subprocess.run function
:rtype: subprocess.CompletedProcess
"""
completed = subprocess.run(
args=args,
executable=executable,
env=env,
cwd=cwd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs
)
logger.info('$ %s %s (env: %s)', executable, str(args), str(env))
if completed.returncode != 0:
logger.warning('Exited with code %s', completed.returncode)
if completed.stderr:
logger.warning(_try_decode(completed.stderr))
if completed.stdout:
logger.debug(_try_decode(completed.stdout))
return completed
|
[
"sashgorokhov@gmail.com"
] |
sashgorokhov@gmail.com
|
c31338954ced6e76da2274aa4b6340be2e11225e
|
aa8e6259f33bdcfdf21434da5185f31aa6927195
|
/gtf_admm_gird_v1.py
|
ded9bf78405dafb35b2100bc464273be8ad8622f
|
[] |
no_license
|
Ryanshuai/graph_trend_filtering_py
|
b32448cfdc4c50a9dfde144abe73e878891f26de
|
243969bf7dd97e483693ac88e45ab2192cd4edbf
|
refs/heads/master
| 2020-11-27T07:13:22.116640
| 2019-12-24T20:56:05
| 2019-12-24T20:56:05
| 229,349,019
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
import numpy as np
from numpy.linalg import norm
from grid_system import grid_system_2d, grid_system_3d
from get_Delta_grid import get_Delta_grid
from soft_thresh import soft_thresh
from construct_O import construct_O
def gtf_admm_grid_v1(y: np.array, k, lamb, rho, max_iter=1000):
y_size = y.size
y_shape = y.shape
y_dim = y.ndim
if y_dim == 2:
D = get_Delta_grid(y_shape, 'gtf2d', 0)
elif y_dim == 3:
D = get_Delta_grid(y_shape, 'gtf3d', 0)
else:
raise AssertionError('Grids with dimension > 3 not supported')
O = construct_O(D, k)
if k % 2 == 0:
O = O[:O.shape[1], :]
y = y.reshape((y_size, 1), order='F')
x = y.copy()
z = np.zeros_like(y, dtype=np.float64)
u = z.copy()
for i in range(max_iter):
if y_dim == 2:
b = (O.T @ (rho * z - u) + y).reshape(y_shape, order='F')
x = grid_system_2d(b, k + 1, rho)
elif y_dim == 3:
b = (O.T @ (rho * z - u) + y).reshape(y_shape, order='F')
x = grid_system_3d(b, k + 1, rho)
x = x.reshape((y_size, 1), order='F')
Ox = O @ x
z_new = soft_thresh(Ox + u / rho, lamb / rho)
s = rho * norm(O.T @ (z_new - z))
z = z_new
u += rho * (Ox - z)
r = norm(Ox - z)
tol_abs = 1e-5
tol_rel = 1e-4
eps_pri = np.sqrt(y.size) * tol_abs + tol_rel * max(norm(Ox), norm(z))
eps_dual = np.sqrt(y.size) * tol_abs + tol_rel * norm(O.T @ u)
if r < eps_pri and s < eps_dual:
print('converged.')
break
if i % 1 == 0:
print('{} [r, s]={}, {}, [eps_pri, eps_dual]={},{}'.format(i, r, s, eps_pri, eps_dual))
tau = 2
if r > 10 * s:
rho *= tau
elif s > 10 * s:
rho /= tau
else: # no break
print('Reached maxiter.')
return x.reshape(y_shape, order='F')
|
[
"1018718155@qq.com"
] |
1018718155@qq.com
|
28dccbb9398db07f45d327d9f7177a7907e88734
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/python/paddle/distributed/launch/controllers/ipu_controller.py
|
bf2c5f34b3bdf946f0f16b99225771bd139022e5
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 6,679
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from paddle.distributed.launch.job.container import Container
from .collective import CollectiveController, ControleMode
class IPUController(CollectiveController):
@classmethod
def enable(cls, ctx):
if ctx.args.training_script == "ipu":
ctx.logger.debug(f"{cls.__name__} enabled")
ctx.args.run_mode = ControleMode.IPU
return True
else:
return False
def parse_ipu_args(self, args_list):
parser = argparse.ArgumentParser()
parser.add_argument(
"--hosts", type=str, help="The hosts for IPU distributd training."
)
parser.add_argument(
"--nproc_per_host",
type=int,
help="The number of processes launched per host.",
)
parser.add_argument(
"--ipus_per_replica",
type=int,
help="The number of IPUs requested per replica.",
)
parser.add_argument(
"--ipu_partition",
type=str,
help="The partition name of IPU devices.",
)
parser.add_argument(
"--vipu_server", type=str, help="The ip of the IPU device manager."
)
parser.add_argument(
"training_script",
type=str,
help="The full path to the IPU distributed training program/script to be launched in parallel. e.g., ``training.py``.",
)
parser.add_argument('training_script_args', nargs=argparse.REMAINDER)
return parser.parse_args(args_list)
def replace_training_script(self):
# IPU distributed computing is based on PopRun which is a wrapper of MPI.
self.ctx.args.training_script = "poprun"
poprun_args = self.parse_ipu_args(self.ctx.args.training_script_args)
num_ipus = int(self.ctx.args.devices)
# The number of replicas for data parallel
assert (
num_ipus % poprun_args.ipus_per_replica
) == 0, "The number of IPUs:{} mod the number of IPUs per replica:{} must == 0".format(
num_ipus, poprun_args.ipus_per_replica
)
num_replicas = num_ipus // poprun_args.ipus_per_replica
self.ctx.logger.info(f"The number of total replicas is {num_replicas}.")
# The number of processes
num_nodes = len(poprun_args.hosts.split(','))
num_procs = num_nodes * poprun_args.nproc_per_host
self.ctx.logger.info(f"The number of total processes is {num_procs}.")
assert (
num_replicas % num_procs
) == 0, "The number of replicas:{} mod the number of processes:{} must == 0".format(
num_replicas, num_procs
)
# hosts and endpoints
hosts = poprun_args.hosts.replace(' ', '').split(',')
endpoints = [x + ":8090" for x in hosts]
# args for poprun
poprun_command = []
poprun_command.append(f'--num-instances={num_procs}')
poprun_command.append(f'--num-replicas={num_replicas}')
poprun_command.append(
f'--ipus-per-replica={poprun_args.ipus_per_replica}'
)
poprun_command.append('--host={}'.format(','.join(hosts)))
poprun_command.append(f'--vipu-partition={poprun_args.ipu_partition}')
poprun_command.append(f'--vipu-server-host={poprun_args.vipu_server}')
poprun_command.extend(
[
'--update-partition=no',
'--vipu-server-timeout=120',
'--print-topology=yes',
'--numa-aware=yes',
]
)
# global envs
global_envs = '--mpi-local-args=\''
log_level = os.getenv('POPART_LOG_LEVEL', None)
if log_level:
global_envs += f'-x POPART_LOG_LEVEL={log_level} '
global_envs += (
'-x PADDLE_TRAINERS_NUM={} -x PADDLE_TRAINER_ENDPOINTS={}'.format(
num_procs, ','.join(endpoints)
)
)
global_envs += '\''
poprun_command.append(global_envs)
# local envs
for idx in range(num_procs):
cur_endpoint = endpoints[idx // poprun_args.nproc_per_host]
rank_in_node = idx % poprun_args.nproc_per_host
poprun_command.append(
'--instance-mpi-local-args={}:\"-x PADDLE_TRAINER_ID={} -x PADDLE_CURRENT_ENDPOINT={} -x PADDLE_RANK_IN_NODE={}\"'.format(
idx, idx, cur_endpoint, rank_in_node
)
)
# executor
poprun_command.append(sys.executable)
# script and script args
poprun_command.append(poprun_args.training_script)
poprun_command.extend(poprun_args.training_script_args)
# for debug
print("----------- PopRun Command -----------")
print("poprun \\")
for i in range(len(poprun_command) - 1):
print("%s \\" % (poprun_command[i]))
print("%s" % (poprun_command[len(poprun_command) - 1]))
print("---------------------------------------")
# replace training_script_args
self.ctx.args.training_script_args = poprun_command
def _get_entrypoint(self):
entrypoint = [self.ctx.args.training_script]
entrypoint.extend(self.ctx.args.training_script_args)
entrypoint = [" ".join(entrypoint)]
return entrypoint
def new_container(
self, entrypoint=None, envs={}, use_ctx_env=True, out=None, err=None
):
c = Container(
entrypoint=(entrypoint or self._get_entrypoint()),
env=(self.ctx.get_envs() if use_ctx_env else {}),
)
c.outfile, c.errfile = self._get_out_err_file(out, err)
c.update_env(envs)
# Need subprocess.Popen(shell=True) for PopRun command
c.shell = True
return c
def run(self):
# Replace the training script with the PopRun command
self.replace_training_script()
self.build_job()
self.build_pod()
self.deploy_pod()
self.watch()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
e2f068cbe30c6a0e2ced7c56d424860a07838c8b
|
d0cd3064e1b24542e02518961fd6643af56738f0
|
/ticha-django-site/handwritten_texts/views.py
|
96faec09e23c5252e504753c7de6a6f472a2b205
|
[] |
no_license
|
zhanpengwang888/Docker-Test
|
126bd74301b7550de753eb7539795e20ace285c5
|
2f0eefb684622d6eead3977697e8ccf4761ba1be
|
refs/heads/master
| 2022-12-15T11:49:46.899365
| 2017-09-29T21:02:05
| 2017-09-29T21:02:05
| 104,515,096
| 0
| 0
| null | 2022-12-07T23:56:40
| 2017-09-22T20:16:17
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,899
|
py
|
from django.shortcuts import render
from .models import HandwrittenText
from django.views.generic import ListView
class HandwrittenListView(ListView):
model = HandwrittenText
template_name = 'handwritten_texts/list.html'
EN_TO_ES = {
'title': 'título', "language": "idioma", "document_type": "tipo_del_documento",
"material_type": "material_type", "archive": "archivo", "collection": "colección",
"call_number": "número_de_etiqueta", "page": "páginas", "date_digitized": "date_digitized",
"year": "year", "town_modern_official": "pueblo", "primary_parties": "personajes_principales",
"slug": "slug", "town_short": "town_short", "date": "fecha", "has_translation": "has_translation",
"transcription": "transcription", "scribe": "escribano", "is_translation": "is_translation",
"witnesses": "testigos", "acknowledgements": "agradecimientos",
"permission_file": "permission_file", "percent_needs_review": "percent_needs_review",
"requester_project": "requester_project", "timeline_text": "timeline_spanish_text",
"timeline_headline": "timeline_spanish_headline"
}
def handwritten_text_detail_view(request, slug):
"""Custom view to supply the HandwrittenText detail template with its
fields in the proper language.
"""
man = HandwrittenText.objects.get(slug=slug)
translated_man = {}
for en_key, es_key in EN_TO_ES.items():
if request.LANGUAGE_CODE == 'es':
try:
translated_man[en_key] = getattr(man, es_key)
except AttributeError:
translated_man[en_key] = getattr(man, en_key)
else:
translated_man[en_key] = getattr(man, en_key)
context = {'man': translated_man, 'omeka_id': man.omeka_id}
return render(request, 'handwritten_texts/detail.html', context)
def redirect_view(request):
return render(request, 'handwritten_texts/redirect.html')
|
[
"zhanpengwang@Zhanpengs-MacBook-Air.local"
] |
zhanpengwang@Zhanpengs-MacBook-Air.local
|
0a4496a02194dde02d89f94e065bb5e8d08c414d
|
b77cc1448ae2c68589c5ee24e1a0b1e53499e606
|
/leave/migrations/0011_auto_20170216_1404.py
|
583d88c706e6ecb11c94de53c4d77eac4b435c0f
|
[] |
no_license
|
PregTech-c/Hrp_system
|
a5514cf6b4c778bf7cc58e8a6e8120ac7048a0a7
|
11d8dd3221497c536dd7df9028b9991632055b21
|
refs/heads/master
| 2022-10-09T07:54:49.538270
| 2018-08-21T11:12:04
| 2018-08-21T11:12:04
| 145,424,954
| 1
| 1
| null | 2022-10-01T09:48:53
| 2018-08-20T13:58:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-16 11:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leave', '0010_auto_20170215_1712'),
]
operations = [
migrations.RenameModel(
old_name='LeaveRequestMessage',
new_name='LeaveRequestApplication',
),
]
|
[
"imugabi64@yahoo.com"
] |
imugabi64@yahoo.com
|
7b88efcd16638abbf9a7b7bf5405d75f01442fa0
|
4ef12965654c4bc9d6a0635ecf8188ecf2defed8
|
/my_new_application_1046/wsgi.py
|
3cb6a828d03839b76f908d22de5a1756e8c484a1
|
[] |
no_license
|
crowdbotics-apps/my-new-application-1046
|
2d30662ae59e1675d0fee78789898852a1b81b45
|
e3ee98e6318bac0a61b3519d148b4b6b80c9c6f5
|
refs/heads/master
| 2022-12-21T22:11:11.515569
| 2019-02-23T00:42:41
| 2019-02-23T00:42:41
| 172,153,638
| 0
| 0
| null | 2022-12-08T01:40:51
| 2019-02-23T00:41:51
|
Python
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
"""
WSGI config for my_new_application_1046 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_new_application_1046.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
cf5d0a7771e323b24bd3ee042f85b1bcbe5f004f
|
2233f520493f64c6070dd3e77722e53a7dd738e8
|
/day5/my_devices_na.py
|
2721f252d62d9ee537165a0fe29a355f3aca4d31
|
[
"Apache-2.0"
] |
permissive
|
mpjeffin/pynet-ons-oct17
|
690bb31600b8ef5131439bb25ddce35b4855ba6a
|
d0daf9c250f79bc34b3b8b06b67004f56ef834a2
|
refs/heads/master
| 2021-09-07T00:00:02.234456
| 2018-02-13T19:58:11
| 2018-02-13T19:58:11
| 125,467,721
| 1
| 0
| null | 2018-03-16T05:26:10
| 2018-03-16T05:26:10
| null |
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
"""
pynet-rtr1 (Cisco IOS) 184.105.247.70
pynet-rtr2 (Cisco IOS) 184.105.247.71
pynet-sw1 (Arista EOS) 184.105.247.72
pynet-sw2 (Arista EOS) 184.105.247.73
pynet-sw3 (Arista EOS) 184.105.247.74
pynet-sw4 (Arista EOS) 184.105.247.75
juniper-srx 184.105.247.76
"""
from getpass import getpass
password = getpass("Enter standard password: ")
cisco_rtr1 = dict(
hostname='184.105.247.70',
device_type='ios',
username='pyclass',
password=password,
optional_args = {}
)
cisco_rtr2 = dict(
hostname='184.105.247.71',
device_type='ios',
username='pyclass',
password=password,
optional_args = {}
)
arista_sw1 = dict(
hostname='184.105.247.72',
device_type='eos',
username='pyclass',
password=password,
optional_args = {}
)
arista_sw2 = dict(
hostname='184.105.247.73',
device_type='eos',
username='pyclass',
password=password,
optional_args = {}
)
juniper_srx = dict(
hostname='184.105.247.76',
device_type='junos',
username='pyclass',
password=password,
optional_args = {}
)
juniper1 = dict(
hostname='juniper1.twb-tech.com',
device_type='junos',
username='pyclass',
password=password,
optional_args = {}
)
juniper2 = dict(
hostname='juniper2.twb-tech.com',
device_type='junos',
username='pyclass',
password=password,
optional_args = {}
)
device_list = [
cisco_rtr1,
cisco_rtr2,
arista_sw1,
arista_sw2,
juniper_srx,
]
|
[
"ktbyers@twb-tech.com"
] |
ktbyers@twb-tech.com
|
438dfdf0b19a083bd9b1157b53d2919d688c2a8a
|
958b0471c52eff93415216cdd1a2b2ad3947a89b
|
/blueoil/templates/lmnet/object_detection.tpl.py
|
cf5170303d51745396007741b7ffa0160df7ae0d
|
[
"Apache-2.0"
] |
permissive
|
fumihwh/blueoil
|
4deb606e334b8456e7ace41e3f091ad6dc41afb6
|
acb5a270f201f34fe5a5b27a4b395d9c3a838b27
|
refs/heads/master
| 2020-04-01T22:29:27.525697
| 2018-10-18T09:23:37
| 2018-10-18T09:23:37
| 153,711,347
| 1
| 0
| null | 2018-10-19T01:49:02
| 2018-10-19T01:49:02
| null |
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from easydict import EasyDict
import tensorflow as tf
from lmnet.common import Tasks
from lmnet.networks.object_detection.{{network_module}} import {{network_class}}
from lmnet.datasets.{{dataset_module}} import {{dataset_class}}
from lmnet.data_processor import Sequence
from lmnet.pre_processor import (
ResizeWithGtBoxes,
DivideBy255,
)
from lmnet.post_processor import (
FormatYoloV2,
ExcludeLowScoreBox,
NMS,
)
from lmnet.data_augmentor import (
Brightness,
Color,
Contrast,
FlipLeftRight,
Hue,
SSDRandomCrop,
)
from lmnet.quantizations import (
binary_channel_wise_mean_scaling_quantizer,
linear_mid_tread_half_quantizer,
)
IS_DEBUG = False
NETWORK_CLASS = {{network_class}}
# TODO(wakisaka): should be hidden. generate dataset class on the fly.
DATASET_CLASS = type('DATASET_CLASS', ({{dataset_class}},), {{dataset_class_property}})
IMAGE_SIZE = {{image_size}}
BATCH_SIZE = {{batch_size}}
DATA_FORMAT = "NHWC"
TASK = Tasks.OBJECT_DETECTION
# In order to get instance property `classes`, instantiate DATASET_CLASS.
CLASSES = DATASET_CLASS(subset="train", batch_size=1).classes
MAX_EPOCHS = {{max_epochs}}
SAVE_STEPS = {{save_steps}}
TEST_STEPS = {{test_steps}}
SUMMARISE_STEPS = {{summarise_steps}}
# distributed training
IS_DISTRIBUTION = False
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""
PRE_PROCESSOR = Sequence([
ResizeWithGtBoxes(size=IMAGE_SIZE),
DivideBy255()
])
anchors = [
(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071)
]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
FormatYoloV2(
image_size=IMAGE_SIZE,
classes=CLASSES,
anchors=anchors,
data_format=DATA_FORMAT,
),
ExcludeLowScoreBox(threshold=score_threshold),
NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,),
])
NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
# In the origianl yolov2 Paper, with a starting learning rate of 10−3, dividing it by 10 at 60 and 90 epochs.
# Train data num per epoch is 16551
step_per_epoch = int(16551 / BATCH_SIZE)
NETWORK.LEARNING_RATE_KWARGS = {
"values": [5e-4, 2e-2, 5e-3, 5e-4],
"boundaries": [step_per_epoch, step_per_epoch * 80, step_per_epoch * 120],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.OBJECT_SCALE = 5.0
NETWORK.NO_OBJECT_SCALE = 1.0
NETWORK.CLASS_SCALE = 1.0
NETWORK.COORDINATE_SCALE = 1.0
NETWORK.LOSS_IOU_THRESHOLD = 0.6
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.SCORE_THRESHOLD = score_threshold
NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold
NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size
NETWORK.SEEN_THRESHOLD = 8000
# quantize
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
'bit': 2,
'max_value': 2.0
}
NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}
NETWORK.QUANTIZE_FIRST_CONVOLUTION = True
NETWORK.QUANTIZE_LAST_CONVOLUTION = False
# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
FlipLeftRight(is_bounding_box=True),
Brightness((0.75, 1.25)),
Color((0.75, 1.25)),
Contrast((0.75, 1.25)),
Hue((-10, 10)),
SSDRandomCrop(min_crop_ratio=0.7),
])
|
[
"matsuda@leapmind.io"
] |
matsuda@leapmind.io
|
1c6b5467cecb01f1ce0b31daf841a086b27f5729
|
2305ce053d16652d31823bd07faf38553b4f9b63
|
/books/CrackingCodesWithPython/Chapter07/PracticeQuestions/Question2.py
|
8a2180532dbdb37385f07898cf49f7569908be5b
|
[
"MIT"
] |
permissive
|
leihuagh/python-tutorials
|
cff3c5e250a152252d4b725bca19f55721483249
|
33831b983d7bd1491e367b6c7654e687d5ba709b
|
refs/heads/master
| 2020-03-29T17:59:31.226400
| 2018-09-24T08:41:26
| 2018-09-24T08:41:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
# Is each spam a global or local variable?
spam = 42 # global/local
def foo():
global spam
spam = 99 # global/local
print(spam)
foo() # mind == blown
|
[
"jose@JoseALerma.com"
] |
jose@JoseALerma.com
|
ffce63d94730102d0599826ce2ea7e70963a22c9
|
0c9ec5d4bafca45505f77cbd3961f4aff5c10238
|
/openapi-python-client/openapi_client/models/process_instance_modification_dto.py
|
a2247513acf2154c0228b1c01bcedaba8beea8c1
|
[
"Apache-2.0"
] |
permissive
|
yanavasileva/camunda-bpm-examples
|
98cd2930f5c8df11a56bf04845a8ada5b3bb542d
|
051f8f28c62845e68ce4059ab64264c5a0bdc009
|
refs/heads/master
| 2022-10-19T20:07:21.278160
| 2020-05-27T15:28:27
| 2020-05-27T15:28:27
| 267,320,400
| 0
| 0
|
Apache-2.0
| 2020-05-27T14:35:22
| 2020-05-27T13:00:01
| null |
UTF-8
|
Python
| false
| false
| 7,337
|
py
|
# coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class ProcessInstanceModificationDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'skip_custom_listeners': 'bool',
'skip_io_mappings': 'bool',
'instructions': 'list[ProcessInstanceModificationInstructionDto]',
'annotation': 'str'
}
attribute_map = {
'skip_custom_listeners': 'skipCustomListeners',
'skip_io_mappings': 'skipIoMappings',
'instructions': 'instructions',
'annotation': 'annotation'
}
def __init__(self, skip_custom_listeners=None, skip_io_mappings=None, instructions=None, annotation=None, local_vars_configuration=None): # noqa: E501
"""ProcessInstanceModificationDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._skip_custom_listeners = None
self._skip_io_mappings = None
self._instructions = None
self._annotation = None
self.discriminator = None
self.skip_custom_listeners = skip_custom_listeners
self.skip_io_mappings = skip_io_mappings
if instructions is not None:
self.instructions = instructions
if annotation is not None:
self.annotation = annotation
@property
def skip_custom_listeners(self):
"""Gets the skip_custom_listeners of this ProcessInstanceModificationDto. # noqa: E501
Skip execution listener invocation for activities that are started or ended as part of this request. # noqa: E501
:return: The skip_custom_listeners of this ProcessInstanceModificationDto. # noqa: E501
:rtype: bool
"""
return self._skip_custom_listeners
@skip_custom_listeners.setter
def skip_custom_listeners(self, skip_custom_listeners):
"""Sets the skip_custom_listeners of this ProcessInstanceModificationDto.
Skip execution listener invocation for activities that are started or ended as part of this request. # noqa: E501
:param skip_custom_listeners: The skip_custom_listeners of this ProcessInstanceModificationDto. # noqa: E501
:type: bool
"""
self._skip_custom_listeners = skip_custom_listeners
@property
def skip_io_mappings(self):
"""Gets the skip_io_mappings of this ProcessInstanceModificationDto. # noqa: E501
Skip execution of [input/output variable mappings](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/#input-output-variable-mapping) for activities that are started or ended as part of this request. # noqa: E501
:return: The skip_io_mappings of this ProcessInstanceModificationDto. # noqa: E501
:rtype: bool
"""
return self._skip_io_mappings
@skip_io_mappings.setter
def skip_io_mappings(self, skip_io_mappings):
"""Sets the skip_io_mappings of this ProcessInstanceModificationDto.
Skip execution of [input/output variable mappings](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/#input-output-variable-mapping) for activities that are started or ended as part of this request. # noqa: E501
:param skip_io_mappings: The skip_io_mappings of this ProcessInstanceModificationDto. # noqa: E501
:type: bool
"""
self._skip_io_mappings = skip_io_mappings
@property
def instructions(self):
"""Gets the instructions of this ProcessInstanceModificationDto. # noqa: E501
JSON array of modification instructions. The instructions are executed in the order they are in. # noqa: E501
:return: The instructions of this ProcessInstanceModificationDto. # noqa: E501
:rtype: list[ProcessInstanceModificationInstructionDto]
"""
return self._instructions
@instructions.setter
def instructions(self, instructions):
"""Sets the instructions of this ProcessInstanceModificationDto.
JSON array of modification instructions. The instructions are executed in the order they are in. # noqa: E501
:param instructions: The instructions of this ProcessInstanceModificationDto. # noqa: E501
:type: list[ProcessInstanceModificationInstructionDto]
"""
self._instructions = instructions
@property
def annotation(self):
"""Gets the annotation of this ProcessInstanceModificationDto. # noqa: E501
An arbitrary text annotation set by a user for auditing reasons. # noqa: E501
:return: The annotation of this ProcessInstanceModificationDto. # noqa: E501
:rtype: str
"""
return self._annotation
@annotation.setter
def annotation(self, annotation):
"""Sets the annotation of this ProcessInstanceModificationDto.
An arbitrary text annotation set by a user for auditing reasons. # noqa: E501
:param annotation: The annotation of this ProcessInstanceModificationDto. # noqa: E501
:type: str
"""
self._annotation = annotation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProcessInstanceModificationDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ProcessInstanceModificationDto):
return True
return self.to_dict() != other.to_dict()
|
[
"noreply@github.com"
] |
yanavasileva.noreply@github.com
|
c0eb4bc8412ecaaaa4487ae33bba16b07ed34654
|
0b5ab7349485da4ea40ca343bc50f4cab74c917c
|
/week06/c11_02.py
|
6f2b70e99e3507ef95e9e833368280cb7b5ebef7
|
[] |
no_license
|
workherd/Python006-006
|
9bf2782ccda037de9af98eb7daa87fd1edeb3caf
|
7aa176c3cf4effd015802b550edfb70f859e94d9
|
refs/heads/main
| 2023-04-29T14:37:43.545376
| 2021-05-16T04:13:08
| 2021-05-16T04:13:08
| 323,247,475
| 1
| 0
| null | 2020-12-21T06:13:42
| 2020-12-21T06:13:42
| null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2021/1/30 21:06
# @Author : john
# @File : c11.py
# 使用type元类创建类
def pop_value(self, dict_value):
for key in self.keys():
if self.__getitem__(key) == dict_value:
self.pop(key)
break
# 元类要求,必须继承自type
class DelValue(type):
# 元类要求,必须实现new方法
def __new__(cls, name, bases, attrs):
attrs['pop_value'] = pop_value
return type.__new__(cls, name, bases, attrs)
class DelDictValue(dict, metaclass=DelValue):
pass
d = DelDictValue()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
d.pop_value('C')
for k,v in d.items():
print(k,v)
|
[
"1330430077@qq.com"
] |
1330430077@qq.com
|
477f6768361db49f12efc9b40192ff00dd8077a5
|
1f2860bf84fa87e2d6c3b5e5b1a62e76879a642a
|
/q41_50/049.py
|
5ff4e3fbd4c56fc5ad1ac2c8c1db93e208b34745
|
[] |
no_license
|
Vegctrp/pic100knock
|
222dc4c981e7d20180a2338184109987f56d1518
|
134a41c9a3fcfc49667a26625cfeaf7bc4a91899
|
refs/heads/master
| 2020-07-08T17:17:43.869124
| 2019-09-23T15:23:08
| 2019-09-23T15:23:08
| 203,730,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
import cv2
import numpy as np
import lib4150
import matplotlib.pyplot as plt
import sys,os
sys.path.append(os.getcwd())
from q01_10 import lib0110
if __name__ == '__main__':
img = cv2.imread("Gasyori100knock/Question_41_50/imori.jpg")
img2 = lib0110.OTSU_binarization(lib0110.BGR2GRAY(img))
out = lib4150.Opening_operation(img2, 1)
cv2.imshow("imori", out)
cv2.waitKey(0)
cv2.imwrite("q41_50/049.jpg", out)
cv2.destroyAllWindows()
|
[
"kem.altair@gmail.com"
] |
kem.altair@gmail.com
|
2b381525fefdeac0ef1e5cd6af040849d7ab9e4e
|
99b0631baa2fd9ab2455d848b47febf581916272
|
/zhijieketang/chapter11/ch11.3.6.py
|
5ed6d9e5c1c618b89767775b7f3df937094f87bb
|
[] |
no_license
|
seceast/PyProjects
|
a934e366cb619f2610d75b9a0fb47d818814a4de
|
7be7193b4126ce920a3d3ffa4ef5d8743b3fa7d1
|
refs/heads/master
| 2023-03-07T22:23:21.229489
| 2021-02-25T05:37:58
| 2021-02-25T05:37:58
| 265,480,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
# coding=utf-8
# 代码文件:chapter11/ch11.3.6.py
class Animal(object):
"""定义动物类"""
def __init__(self, age, sex=1, weight=0.0):
self.age = age # 定义年龄实例变量
self.sex = sex # 定义性别实例变量
self.weight = weight # 定义体重实例变量
def eat(self):
self.weight += 0.05
print('eat...')
def run(self):
self.weight -= 0.01
print('run...')
a1 = Animal(2, 0, 10.0)
print('a1体重:{0:0.2f}'.format(a1.weight))
a1.eat()
print('a1体重:{0:0.2f}'.format(a1.weight))
a1.run()
print('a1体重:{0:0.2f}'.format(a1.weight))
|
[
"yangyadong25@163.com"
] |
yangyadong25@163.com
|
30d160a89cb021b89ffa3a81257339d90f223c24
|
b44df2be270793884ca5bd5e79d22c991edae001
|
/app/auth/views.py
|
9ac180988110a27bb0106dfd45b959fc130d2335
|
[] |
no_license
|
Wakarende/Blog
|
d45c423be8a86227ad4106bbdae429a79ef3dcf2
|
2e2d2f89c55c4f5dbe0cf6fe09f83212bf905f5c
|
refs/heads/master
| 2023-04-23T00:18:47.343723
| 2021-05-05T06:46:51
| 2021-05-05T06:46:51
| 363,127,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
from flask import render_template,redirect,url_for,flash,request
from . import auth
from flask_login import login_required,login_user,logout_user
from ..models import User
from .forms import RegistrationForm,LoginForm
from .. import db
from ..email import mail_message
@auth.route('/login', methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to My J Word","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
[
"joykirii@gmail.com"
] |
joykirii@gmail.com
|
71f210792e7bb560ed5a7e5db4e79a338bba1c63
|
ba15fac9fbe53578d3823ceeac67fc338e2312e9
|
/tests/functional/filters/e2e_filtering.py
|
2980783ed05dd42c1c343efb2ed74a65e966af43
|
[
"Apache-2.0"
] |
permissive
|
dblenkus/resolwe-bio-py
|
0694dcc32f485ccdd5ad43496ccef985c83819c7
|
b6610ef26625492f39fdeef846d3e5a89a0009b3
|
refs/heads/master
| 2021-01-21T03:33:39.831803
| 2020-05-25T14:29:46
| 2020-05-26T07:48:34
| 55,721,959
| 0
| 0
| null | 2016-04-07T19:26:12
| 2016-04-07T19:26:12
| null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import six
from ..base import FILES_PATH, BaseResdkFunctionalTest
class BaseResdkFilteringTest(BaseResdkFunctionalTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.data
def _get_ids(self, query):
"""Return id's of objects in query."""
return [getattr(elm, "id") for elm in query]
def _check_filter(self, query_args, expected):
response = self._get_ids(self.endpoint.filter(**query_args))
expected = self._get_ids(expected)
six.assertCountEqual(self, response, expected)
@staticmethod
def datetime_to_str(datetime):
return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
class TestDataFilter(BaseResdkFilteringTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.data
self.data1 = self.res.run(
slug="upload-fasta-nucl",
input={
"src": os.path.join(FILES_PATH, "genome.fasta.gz"),
"species": "Homo sapiens",
"build": "hg38",
},
data_name="Data 1",
)
self.data2 = self.res.run(
slug="upload-fasta-nucl",
input={
"src": os.path.join(FILES_PATH, "genome.fasta.gz"),
"species": "Homo sapiens",
"build": "hg38",
},
data_name="Data 2",
)
def tearDown(self):
super().tearDown()
self.data1.delete(force=True)
self.data2.delete(force=True)
def test_id(self):
self._check_filter({"id": self.data1.id}, [self.data1])
self._check_filter({"id": self.data2.id}, [self.data2])
self._check_filter({"id__in": [self.data1.id]}, [self.data1])
self._check_filter(
{"id__in": [self.data1.id, self.data2.id]}, [self.data1, self.data2]
)
class TestProcessFilter(BaseResdkFilteringTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.process
self.star = self.res.process.get(slug="alignment-star")
self.hisat2 = self.res.process.get(slug="alignment-hisat2")
def test_id(self):
self._check_filter({"id": self.star.id}, [self.star])
self._check_filter({"id": self.hisat2.id}, [self.hisat2])
self._check_filter({"id__in": [self.star.id]}, [self.star])
self._check_filter(
{"id__in": [self.star.id, self.hisat2.id]}, [self.star, self.hisat2]
)
def test_iterate_method(self):
workflows = list(
self.res.process.filter(type="data:workflow").iterate(chunk_size=10)
)
# Use ``assertGreater`` to avoid updating this test each time
# after new workflow is added / removed.
self.assertGreater(len(workflows), 30)
class TestFeatureFilter(BaseResdkFilteringTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.feature
self.ft1 = self.res.feature.get(
source="ENSEMBL", feature_id="id_001", species="Homo sapiens",
)
self.ft2 = self.res.feature.get(
source="ENSEMBL", feature_id="id_002", species="Mus musculus",
)
@unittest.skip("Turn on when one can prepare KnowledgeBase and ES index for it.")
def test_id(self):
self._check_filter({"feature_id": self.ft1.feature_id}, [self.ft1])
self._check_filter(
{"feature_id__in": [self.ft1.feature_id, self.ft2.feature_id]},
[self.ft1, self.ft2],
)
|
[
"zmrzlikar.jure@gmail.com"
] |
zmrzlikar.jure@gmail.com
|
9375ac379d9e628b7e3443a2979108c2d4b929d6
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/sieve/e7fe59df2209451392330ca4b6e3a767.py
|
92ce912956b69c6858c7563937f5b3852a638d3c
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
def sieve(lim):
rng = range(2,lim+1)
output = range(2,lim+1)
for i in range(len(rng)):
count = 0
for j in range(len(output)):
if output[count] != rng[i]:
if not output[count] % rng[i]:
output.remove(output[count])
count -= 1
count += 1
return output
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
394523d7bb019d8ca4af4992d2c5ef47409c3c40
|
88a02059cbf16303dad9745a774230b2e078410f
|
/1163.py
|
4ea1e789406c0eacec92cc075cfdac006f318799
|
[] |
no_license
|
nekoTheShadow/my_answers_of_yukicoder
|
2e3a94b6dab214fda1ae3c53f879190e6eedc31f
|
d607dee056a84f08305a51d6eb0bac9faa5d74ed
|
refs/heads/master
| 2022-01-16T06:31:48.776630
| 2022-01-04T08:11:27
| 2022-01-04T08:11:27
| 132,351,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
n, x = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
for i in range(n):
a[i] -= x
m = sum(a)
if m >= 0:
print(0)
exit()
if max(a) < 0:
print(-1)
exit()
dp = [{} for _ in range(n+1)]
dp[0][m] = 0
for i in range(n):
for k in dp[i]:
if k in dp[i+1]:
dp[i+1][k] = min(dp[i+1][k], dp[i][k])
else:
dp[i+1][k] = dp[i][k]
if k-a[i] in dp[i+1]:
dp[i+1][k-a[i]] = min(dp[i+1][k-a[i]], dp[i][k]+b[i])
else:
dp[i+1][k-a[i]] = dp[i][k]+b[i]
ans = float('inf')
for k in dp[n]:
if k >= 0:
ans = min(ans, dp[n][k])
print(ans)
|
[
"h.nakamura0903@gmail.com"
] |
h.nakamura0903@gmail.com
|
0f0f5d0b1c85ad0e5682d38e9cb7c54b8cbd0dd4
|
6c29a72dfe9eed38f4667babf74c3ae98983be6f
|
/tests/test_fixtures.py
|
445ff550251b679e46e044775df9352aed857bb4
|
[
"Apache-2.0"
] |
permissive
|
ryan-rs/pytest-examples
|
0149f79a069a2fb272486d3bf25a756c341f38e8
|
9ca2368c5e86f651497e42bb304415cf2ae3a0df
|
refs/heads/master
| 2020-03-26T15:04:17.374972
| 2018-11-27T16:38:47
| 2018-11-27T16:38:47
| 145,020,977
| 0
| 0
|
NOASSERTION
| 2018-11-12T19:14:40
| 2018-08-16T17:43:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,540
|
py
|
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Imports
# ======================================================================================================================
import os
import pytest
# ======================================================================================================================
# Fixtures
# ======================================================================================================================
@pytest.fixture
def prefix():
"""A message prefix."""
return 'The start of the message.'
@pytest.fixture
def message():
"""The message"""
return '\nThe message!\n'
@pytest.fixture
def suffix():
"""A message suffix."""
return 'The end of the message.\n'
@pytest.fixture
def static_message_fixture(tmpdir_factory, prefix, message, suffix):
"""A fixture which provides a static message."""
filename = tmpdir_factory.mktemp('data').join('static_message.txt').strpath
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
return filename
@pytest.fixture
def static_message_with_setup_teardown_fixture(tmpdir_factory, prefix, message, suffix):
"""A fixture which provides a static message, but uses a custom setup/teardown."""
# Setup
filename = '/tmp/static_message.txt'
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
# Deliver
yield filename
# Teardown
os.remove(filename)
@pytest.fixture
def dyanmic_message_fixture_factory(tmpdir_factory, prefix, suffix):
"""A fixture which provides a dynamic message."""
filename = tmpdir_factory.mktemp('data').join('dynamic_message.txt').strpath
def _factory(message):
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
return filename
return _factory
# ======================================================================================================================
# Test Cases
# ======================================================================================================================
@pytest.mark.test_id('747ba3e0-aafb-11e8-bfa2-0025227c8120')
@pytest.mark.jira('ASC-891')
def test_static_message(static_message_fixture, prefix, message, suffix):
"""Verify that the file contains the correct message."""
with open(static_message_fixture, 'r') as f:
assert f.read() == "{0}{1}{2}".format(prefix, message, suffix)
@pytest.mark.test_id('747b9fc6-aafb-11e8-bfa2-0025227c8120')
@pytest.mark.jira('ASC-891')
def test_static_message_with_setup_teardown(static_message_with_setup_teardown_fixture, prefix, message, suffix):
"""Verify that the file contains the correct message."""
with open(static_message_with_setup_teardown_fixture, 'r') as f:
assert f.read() == "{0}{1}{2}".format(prefix, message, suffix)
@pytest.mark.test_id('747b9b84-aafb-11e8-bfa2-0025227c8120')
@pytest.mark.jira('ASC-891')
def test_dynamic_message(dyanmic_message_fixture_factory, prefix, suffix):
"""Verify that the file contains the correct message."""
custom_message = 'Wow! Much Custom!'
with open(dyanmic_message_fixture_factory(custom_message), 'r') as f:
assert f.read() == "{0}{1}{2}".format(prefix, custom_message, suffix)
|
[
"ryan.gard@rackspace.com"
] |
ryan.gard@rackspace.com
|
485f78bf0f29fec745e66e6f68080ca1aaf408bf
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02574/s928939056.py
|
075c9216b7de983528e8012b9dc8198741dc64ee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
from math import gcd
from functools import reduce
k=10**6+1
def judge(n,a):
c=[0]*k
for x in a:
c[x]+=1 #対応する数の個数を記録
t=any(sum(c[i::i])>1 for i in range(2,k)) #自身を約数に持つ数が2つ以上与えられたリストに存在するような数が一つでもあるかどうか
t+=reduce(gcd,a)>1 #全体について1以外の公約数があれば1加える
return ['pairwise','setwise','not'][t]+' coprime' #全体に公約数があればt=2,全体の公約数が1で公約数持つペアがあればt=1
n=int(input())
a=list(map(int,input().split()))
print(judge(n,a))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1619841e22d47095341a3633936eb0d746446e6c
|
1ff31cedc4794083e213e6637deaacab49cfdd9a
|
/pyatv/mrp/tlv8.py
|
c8675b1741300017851aee0f599941efce39e7b5
|
[
"MIT"
] |
permissive
|
dschu012/pyatv
|
910cefec45fcfe94fe9b3fee59672299215db24b
|
6496548aee09ff95f5515abb172c1ba19b9d995b
|
refs/heads/master
| 2020-12-04T06:35:25.957921
| 2020-01-01T14:38:54
| 2020-01-01T18:35:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
"""Implementation of TLV8 used by MRP/HomeKit pairing process.
Note that this implementation only supports one level of value, i.e. no dicts
in dicts.
"""
# Some of the defined tags used by the pairing process
TLV_METHOD = '0'
TLV_IDENTIFIER = '1'
TLV_SALT = '2'
TLV_PUBLIC_KEY = '3'
TLV_PROOF = '4'
TLV_ENCRYPTED_DATA = '5'
TLV_SEQ_NO = '6'
TLV_ERROR = '7'
TLV_BACK_OFF = '8'
TLV_SIGNATURE = '10'
def read_tlv(data):
"""Parse TLV8 bytes into a dict.
If value is larger than 255 bytes, it is split up in multiple chunks. So
the same tag might occurr several times.
"""
def _parse(data, pos, size, result=None):
if result is None:
result = {}
if pos >= size:
return result
tag = str(data[pos])
length = data[pos+1]
value = data[pos+2:pos+2+length]
if tag in result:
result[tag] += value # value > 255 is split up
else:
result[tag] = value
return _parse(data, pos+2+length, size, result)
return _parse(data, 0, len(data))
def write_tlv(data):
"""Convert a dict to TLV8 bytes."""
tlv = b''
for key, value in data.items():
tag = bytes([int(key)])
length = len(value)
pos = 0
# A tag with length > 255 is added multiple times and concatenated into
# one buffer when reading the TLV again.
while pos < len(value):
size = min(length, 255)
tlv += tag
tlv += bytes([size])
tlv += value[pos:pos+size]
pos += size
length -= size
return tlv
|
[
"pierre.staahl@gmail.com"
] |
pierre.staahl@gmail.com
|
c37fddbe72a4bf5e7895fef5d2695c5dec44a3c9
|
f3c2fa4c6ef32e01b98ac56a2e25419152d69208
|
/gpio-utils/radiosimulator.py
|
2c6810bd11fb90a6e076c1bbac1c5bd5e6bd0e97
|
[
"MIT"
] |
permissive
|
deets/brombeerquark
|
bd4687d42f9466cd5f6843df6a49e647cf3e2fcc
|
9314bc6adaf19ee3868612c8aafdce0f1ebbabb9
|
refs/heads/master
| 2021-07-19T07:02:22.427227
| 2021-02-28T12:45:57
| 2021-02-28T12:45:57
| 47,883,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,399
|
py
|
from __future__ import print_function
from functools import partial
import time
import threading
import Queue
from tfa import TimedFiniteAutomaton
def simulate_gpio_events(queue):
time.sleep(1.0)
# just increase volume by pressing/releasing once
queue.put("volume+pressed")
queue.put("volume+released")
time.sleep(3.0)
# now just hold volume+pressed til 11!
queue.put("volume+pressed")
time.sleep(7.0)
queue.put("volume+released")
# now just hold volume-pressed til we are back to 1
queue.put("volume-pressed")
time.sleep(7.0)
queue.put("volume-released")
# finally, toggle play/pause
queue.put("volume-pressed")
time.sleep(0.1)
queue.put("volume+pressed")
# let go of both buttons
queue.put("volume-release")
queue.put("volume+released")
class Radio(object):
MINVOL, MAXVOL = 1, 11
SAME_TIME_THRESHOLD = .3
def __init__(self):
self._volume = self.MINVOL
self.playing = True
automat = TimedFiniteAutomaton("idle")
automat.add_state("volume_up")
automat.add_state("volume_down")
automat.add_state("nudge_up")
automat.add_state("nudge_down")
automat.add_state("volume_up_or_toggle")
automat.add_state("volume_down_or_toggle")
automat.add_state("toggle_play_pause")
# waiting for either volume change or toggling play/pause
automat.add_transition("idle", "volume_up_or_toggle", "volume+pressed")
automat.add_transition("idle", "volume_down_or_toggle", "volume-pressed")
# after self.SAME_TIME_THRESHOLD seconds, we will transition to volue up/down
# we will re-enter the state on .5 timer events to further increase volume
automat.add_transition("volume_up_or_toggle", "volume_up", self.SAME_TIME_THRESHOLD)
automat.add_transition("volume_down_or_toggle", "volume_down", self.SAME_TIME_THRESHOLD)
automat.add_transition("volume_up", "volume_up", .5)
automat.add_transition("volume_down", "volume_down", .5)
automat.add_transition("volume_up", "idle", "volume+released")
automat.add_transition("volume_down", "idle", "volume-released")
# when we wait for toggle_play_pause, but already release,
# just nudge the volume once in the respective direction!
automat.add_transition("volume_up_or_toggle", "nudge_up", "volume+released")
automat.add_transition("nudge_up", "idle")
automat.add_transition("volume_down_or_toggle", "nudge_down", "volume-released")
automat.add_transition("nudge_down", "idle")
# if within this timeframe the opposite key was pressed, toggle!
automat.add_transition("volume_up_or_toggle", "toggle_play_pause", "volume-pressed")
automat.add_transition("volume_down_or_toggle", "toggle_play_pause", "volume+pressed")
# from play_pause, transition automatically back to idle
automat.add_transition("toggle_play_pause", "idle")
self._automat = automat
self._automat.add_state_change_listener(self._react_to_state_changes)
print(automat.dot())
def _react_to_state_changes(self, _from, to, _on):
if to in ("volume_up", "nudge_up"):
self.volume += 1
elif to in ("volume_down", "nudge_down"):
self.volume -= 1
elif to == "toggle_play_pause":
self.playing = not self.playing
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = min(max(value, self.MINVOL), self.MAXVOL)
def run(self):
q = Queue.Queue()
t = threading.Thread(target=partial(simulate_gpio_events, q))
t.daemon = True
t.start()
self._automat.add_state_change_listener(self._print_status)
while True:
try:
event = q.get(block=True, timeout=.1)
except Queue.Empty: #timeout
self._automat.tick()
else:
print("feed", event)
self._automat.feed(event)
def _print_status(self, *_a):
print("Playing: {}, Volume: {}, State: {} ".format(
self.playing,
self.volume,
self._automat.state,
)
)
def main():
radio = Radio()
radio.run()
if __name__ == '__main__':
main()
|
[
"deets@web.de"
] |
deets@web.de
|
342f10467123051dda12ff9cfcfa59cb2048ea18
|
7bdc1a3565ba8964658a749fb05ddc27f20d0a33
|
/scripts/mvn_incremental
|
d92faf0f5cebe8ccda4cf17fbd1335426c57b1b4
|
[] |
no_license
|
wuan/ci-tools
|
375cd64d0197e7e482255661c998f11e06c3e404
|
6796ee0a9f0b11a4c4ac7c05bdad047e6edd3313
|
refs/heads/master
| 2020-12-24T16:24:02.820016
| 2016-03-04T07:33:19
| 2016-03-04T07:33:19
| 39,160,905
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,229
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright 2015 Andreas Würl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from optparse import OptionParser
import os
import sys
from citools.maven import Pom
if __name__ == '__main__':
pom = Pom(os.path.join(os.getcwd(), "pom.xml"))
modules = set(pom.modules)
parser = OptionParser()
(options, args) = parser.parse_args()
if len(args) == 1:
target = args[0]
persistence = Persistence(target + '.db')
report = persistence.report
if report is not None:
with open('junit.xml', 'w') as junit_result_file:
TestSuite.to_file(junit_result_file, report.test_suites, False, "latin1")
|
[
"andi@tryb.de"
] |
andi@tryb.de
|
|
03d53a2fa862909aa754424596e338d8baa4977a
|
03cbc74c3b5c3147e2a5ccfe668594350ac32e09
|
/lib/game_controller.py
|
9ce9f53324d0447b7aa41f4d4860de35d7426544
|
[] |
no_license
|
yapo/scoreboard
|
51e1b5ae07ad23390b920c2a8cad13f4e9e71a4f
|
f35457125e377d19d912509b3b7c7749fc5a15aa
|
refs/heads/master
| 2021-01-18T12:05:53.859445
| 2014-08-30T21:33:58
| 2014-08-30T21:33:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
import sys
class game_controller(object):
def __init__(self, config):
self.config = config
self.player1 = player('white')
self.player2 = player('black')
self.last_scored_player = None
self.victory_callbacks = []
self.danger_zone_callbacks = []
self.combo_breaker_callbacks = []
self.first_blood_callbacks = []
def reset(self):
self.player1.reset()
self.player2.reset()
def score(self, player_label):
# identify the players
player = self.player1 if player_label == 'white' else self.player2
other_player = self.player2 if player == self.player1 else self.player1
is_combo_breaker = other_player.combo_counter > 2
if player is not self.last_scored_player:
player.combo_breaker()
other_player.combo_breaker()
self.last_scored_player = player
# score
player.score()
self.player1.show_score()
self.player2.show_score()
# raise game events
if player.goal_counter == self.config.max_goals:
self.victory(player)
elif player.goal_counter == 1 and other_player.goal_counter == 0:
self.execute_callbacks(self.first_blood_callbacks)
elif player.goal_counter == self.config.max_goals - 1:
self.execute_callbacks(self.danger_zone_callbacks)
elif is_combo_breaker:
self.execute_callbacks(self.combo_breaker_callbacks)
def add_handler(self, event_name, handler = None):
callbacks = { 'victory': self.victory_callbacks,
'danger_zone': self.danger_zone_callbacks,
'first_blood': self.first_blood_callbacks,
'combo_breaker': self.combo_breaker_callbacks
}
if event_name in callbacks:
callbacks[event_name].append(handler)
return len(callbacks[event_name]) - 1
else:
raise Exception('non valid event name: {}'.format(event_name))
def execute_callbacks(self, callbacks):
winner = self.get_winner()
loser = self.player1 if not self.player1 == winner else self.player2
for callback in callbacks:
if callback is not None:
callback(winner, loser)
def victory(self, player):
print "victory ... player {} wins".format(player.label)
player.winner = True
self.execute_callbacks(self.victory_callbacks)
def get_winner(self):
return self.player1 if self.player1.goal_counter >= self.player2.goal_counter else self.player2
def get_scored_player(self):
return self.last_scored_player
def get_other_player(self, player):
return self.player1 if player is not self.player1 else self.player2
class player(object):
def __init__(self, label):
self.label = label
self.goal_counter = 0
self.combo_counter = 0
self.winner = False
def reset(self):
print "{}: reset".format(self.label)
self.goal_counter = 0
self.combo_counter = 0
def score(self):
self.goal_counter += 1
self.combo_counter += 1
def show_score(self):
print "{}: score - {}: combos {}".format(self.label, self.goal_counter, self.combo_counter)
def combo_breaker(self):
self.combo_counter = 0
|
[
"root@raspberrypi.(none)"
] |
root@raspberrypi.(none)
|
dff6b6a5b2d3975ef21308b798a270290acf6b65
|
006341ca12525aa0979d6101600e78c4bd9532ab
|
/CMS/Zope-3.2.1/Dependencies/twisted-Zope-3.2.1/twisted/vfs/pathutils.py
|
a5eb45e442945312b73e304ec7158ad550551c8a
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"ZPL-2.0"
] |
permissive
|
germanfriday/code-examples-sandbox
|
d0f29e20a3eed1f8430d06441ac2d33bac5e4253
|
4c538584703754c956ca66392fdcecf0a0ca2314
|
refs/heads/main
| 2023-05-30T22:21:57.918503
| 2021-06-15T15:06:47
| 2021-06-15T15:06:47
| 377,200,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,390
|
py
|
from zope.interface import Interface, Attribute, implements
def getAbsoluteSegments(path, cwd='/'):
"""
@param path: either a string or a list of string segments
which specifys the desired path. may be relative to the cwd
@param cwd: optional string specifying the current working directory
returns a list of string segments which most succinctly
describe how to get to path from root
"""
if not isinstance(path, list): paths = path.split("/")
else: paths = path
if len(paths) and paths[0] == "":
paths = paths[1:]
else:
paths = cwd.split("/") + paths
result = []
for path in paths:
if path == "..":
if len(result) > 1:
result = result[:-1]
else:
result = []
elif path not in ("", "."):
result.append(path)
return result
def fetch(root, path, cwd='/'):
"""
@param root: IFileSystemContainer which represents the root node
of the filesystem
@param path: either a string or a list of string segments
which specifys the desired path. may be relative to the cwd
@param cwd: optional string specifying the current working directory
returns node described by path relative to the cwd
"""
paths = getAbsoluteSegments(path, cwd)
currNode = root
for path in paths:
currNode = currNode.child(path)
return currNode
def basename(path, cwd='/'):
return getAbsoluteSegments(path, cwd)[-1]
def dirname(path, cwd='/'):
return "/" + "/".join(getAbsoluteSegments(path, cwd)[:-1])
def getRoot(node):
while node.parent is not node:
node = node.parent
return node
def getSegments(node):
ret = []
while node.parent is not node:
ret.append(node.name)
node = node.parent
ret.reverse()
return ret
class IFileSystem(Interface):
root = Attribute("root IFileSystemNode of the IFileSystem")
pathToCWD = Attribute("path to current working directory")
def absPath(path):
"""
returns a normalized absolutized version of the pathname path
"""
def splitPath(path):
"""
returns a normalized absolutized version of the pathname path
split on the filesystem's directory seperator
"""
def joinPath(tail, head):
"""
joins the two paths, tail and head
"""
def dirname(path):
"""
returns the directory name of the container for path
"""
def basename(path):
"""
returns the base name of pathname path
"""
def fetch(path):
"""
returns a node object representing the file with pathname path
"""
def _getImplicitChildren(dir):
"""
returns implicit children for a given dir
this is placed in the filesystem so that the same
directory can have different implicit children depending
on what sort of filesystem it has been placed in
- may not be the best idea ...
returns a list of 2 element tuples:
[ ( path, nodeObject ) ]
eg.
[ ( ".", dir ), ( "..", dir.parent ) ]
"""
class FileSystem:
"""
Wraps unix-like VFS backends, in which directory separator is '/',
root's path is '/', and all directories have '.' and '..'.
Effectively, this is just a convenience wrapper around the other
functions in this module which remembers the root node and the
current working directory.
"""
implements(IFileSystem)
def __init__(self, root, pathToCWD="/"):
self.root = root
self.root.filesystem = self
self.pathToCWD = pathToCWD
def absPath(self, path):
return "/" + "/".join(self.splitPath(path))
def splitPath(self, path):
return getAbsoluteSegments(path, self.pathToCWD)
def joinPath(self, tail, head):
if tail == "/":
return tail + head
else:
return tail + "/" + head
def dirname(self, path):
return dirname(path, self.pathToCWD)
def basename(self, path):
return basename(path, self.pathToCWD)
def fetch(self, pathToFile="."):
return fetch(self.root, pathToFile, self.pathToCWD)
def _getImplicitChildren(self, dir):
return [(".", dir), ("..", dir.parent)]
|
[
"chris@thegermanfriday.com"
] |
chris@thegermanfriday.com
|
2e37a656e0edae2639a95df75b96978d92948395
|
6c547e3312e2d1bd3dab123b831053ed7aef7b6d
|
/pages/MYCL/gain_loss/realized.py
|
cdacebeed29d6225098e87c9886c149079ecb7fb
|
[] |
no_license
|
kenito2050/BICL
|
8c4239f1e897e4dfc04aa35e827816242b41d5dd
|
82891aba56cc49c9cf96ce82472847c4cb10828f
|
refs/heads/master
| 2020-12-31T22:10:44.784193
| 2020-02-10T23:00:10
| 2020-02-10T23:00:10
| 239,039,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
from selenium.webdriver.common.by import By
from config_globals import *
class realized():
def __init__(self, driver):
self.driver = driver
def Page_Elements(self):
# Table Header
self.table_header = self.driver.find_element(By.XPATH, "/html/body/div[1]/div[3]/div/div/ui-view/div/div[2]/div[3]/div/table")
# Table
self.table = self.driver.find_element(By.XPATH, "/html/body/div[1]/div[3]/div/div/ui-view/div/div[2]/div[3]/div/table/tbody")
return self
# Actions
def verify_Total_Displays(self, test_case_ID, browser, env, time_stamp):
columns = self.driver.find_elements(By.XPATH,
"/html/body/div[1]/div[3]/div/div/ui-view/div/div[2]/div[5]/div/table/tbody")
text_displays = False
for item in columns:
text = columns[0].text
if ("Total" in text):
text_displays = True
break
break
try:
assert text_displays is True
except AssertionError:
screenshot_name = "FAIL" + "_" + test_case_ID + "_" + browser + "_" + env + "_" + time_stamp + ".png"
saved_screenshot_location = str(screenshot_directory / screenshot_name)
self.driver.get_screenshot_as_file(saved_screenshot_location)
raise
|
[
"ken.villarruel@gmail.com"
] |
ken.villarruel@gmail.com
|
7088ef175232c41f87a46face5ef3c3f34a5927d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02256/s464205146.py
|
66f0c6b76ec4f68636fea32269d78178a8e85ded
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
a, b = map(int, input().split())
c = []
if a > b:
a, b = b, a
if b%a == 0:
print(a)
else:
while True:
for i in range(a):
x = i + 2
#print(a, x)
if a%x == 0:
if b%x == 0:
c.append(x)
a = a//x
b = b//x
#print(c)
break
elif b%(a//x) == 0:
c.append(a//x)
a = x
b = b//(a//x)
#print(c)
break
#if x%1000 == 0:
#print(x)
if x > a**0.5:
break
if x > a**0.5:
break
s = 1
for j in c:
s = s * j
print(s)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e5889701fc9377bce8eda18387d6adaea7a0c042
|
50bd113a98b1657d735a933c0fcc446dd8c35e3e
|
/companies/migrations/0011_auto_20190807_0332.py
|
59e7718a743d90f137978161d15d48a3cf61152f
|
[
"Apache-2.0"
] |
permissive
|
fuseumass/hackerforce
|
ab0d7e2fcb0c25276eac977fd628a0c67411e059
|
dfb6ac1304a7db21853765de9da795e8e9ef20bf
|
refs/heads/development
| 2022-12-23T09:44:38.319260
| 2019-12-05T00:11:59
| 2019-12-28T06:22:01
| 194,482,639
| 13
| 7
|
Apache-2.0
| 2022-12-08T03:16:40
| 2019-06-30T06:21:44
|
CSS
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
# Generated by Django 2.2.3 on 2019-08-07 07:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('companies', '0010_auto_20190807_0322'),
]
operations = [
migrations.AlterField(
model_name='industry',
name='color',
field=models.CharField(choices=[('blue', 'blue'), ('green', 'green'), ('purple', 'purple'), ('orange', 'orange'), ('yellow', 'yellow'), ('red', 'red'), ('brown', 'brown'), ('pink', 'pink'), ('gray', 'gray')], max_length=10),
),
]
|
[
"j@wogloms.net"
] |
j@wogloms.net
|
88065cf2fd2349c82ab8c1843bf968f43a975af1
|
98be00ee32971cade82d10c067aff532c3394a62
|
/geeksforgeeks/linked_list_merge_sort.py
|
200e6d2c43b2057dfa6cccd4818f7680ebebc6f6
|
[] |
no_license
|
vigneshhari/Competitive_solutions
|
5ab34933ea8d84eab67bdef9bb9e4562f6b90782
|
7a35e1386e5cff71cb5746b6797ccc0f03ceb3f4
|
refs/heads/master
| 2023-01-11T02:53:01.456863
| 2022-12-29T13:50:03
| 2022-12-29T13:50:03
| 115,146,700
| 4
| 2
| null | 2019-10-26T09:15:03
| 2017-12-22T20:03:51
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
class ll:
next = None
def __init__(self,val):
self.val = val
def setnext(self,next):
self.next = next
def stringll(node):
if(node == None):return ""
return str(node.val) + " " + stringll(node.next)
head = ll(-1)
looper = head
for i in range(input()):
temp = ll(input())
looper.setnext(temp)
looper = looper.next
print stringll(head)
|
[
"vichuhari100@gmail.com"
] |
vichuhari100@gmail.com
|
ebaa610d6c8eb57c44c39600b076754a616c11b1
|
2ce46e0e82fca0c69228e56e38f33bbb198f9f04
|
/homework/homework_pageobject_pytest_siye/PageObjects/tender_page.py
|
536bf0d6128bbae18aad74d9189795e558e4f593
|
[] |
no_license
|
seesun1/LemonClass
|
157ad65e0818bd76e4edafcc7e0889b89109d961
|
39fc70d467079ed8e9a229f03858160bf8d37926
|
refs/heads/master
| 2021-09-15T08:04:00.517038
| 2017-12-25T14:26:30
| 2017-12-25T14:26:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,775
|
py
|
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
# @Time : 2017/12/4 0:01
# @Author : SiYe
# @PROJECT_NAME : LemonClass
# @File : project_page.py
# @Software: PyCharm
#-------------------------------------------------------------------------------
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
from homework.homework_pageobject_pytest_siye.Common.elements_locator import *
class TenderPage:
def __init__(self,driver):
self.driver = driver
#正常投标操作
def bid_normal(self,bid_name,amount):
WebDriverWait(self.driver, 60, 1).until(EC.presence_of_element_located((By.XPATH, bidding_num_locator %bid_name))) # 判断元素加载完成
self.driver.find_element_by_xpath(bidding_num_locator %bid_name).send_keys(amount)
self.driver.find_element_by_xpath(bidding_bidButton_locator %bid_name).click()
tender_date = time.strftime('%Y-%m-%d')
tender_time = time.strftime('%H:%M')
return tender_date, tender_time
#勾选全投进行投标操作
def bid_allBid(self,bid_name):
WebDriverWait(self.driver, 60, 1).until(EC.presence_of_element_located((By.XPATH, bidding_num_locator %bid_name))) # 判断元素加载完成
self.driver.find_element_by_xpath(bidding_allBid_locator %bid_name).click()
self.driver.find_element_by_xpath(bidding_bidButton_locator %bid_name).click()
#获取投标成功信息
def get_success_info(self):
WebDriverWait(self.driver, 60, 1).until(EC.visibility_of_element_located((By.XPATH, bidding_success_info_locator))) # 判断投标成功界面显示文字完成
return self.driver.find_element_by_xpath(bidding_success_info_locator).text
#获取投标失败信息
def get_error_info(self):
WebDriverWait(self.driver, 60, 1).until(EC.visibility_of_element_located((By.XPATH, bidding_error_info_locator ))) # 判断投标成功界面显示文字完成
return self.driver.find_element_by_xpath(bidding_error_info_locator ).text
#获取完整标名
def get_bid_name(self,bid_name):
WebDriverWait(self.driver, 60, 1).until(EC.visibility_of_element_located((By.XPATH, bidding_bidName_locator %bid_name))) # 判断投标失败提示显示文字完成
return self.driver.find_element_by_xpath(bidding_bidName_locator %bid_name).text
#关闭投标成功提示操作
def close_success_info(self):
WebDriverWait(self.driver, 60, 1).until(EC.presence_of_element_located((By.XPATH, bidding_success_close_locator))) # 判断元素加载完成
self.driver.find_element_by_xpath(bidding_success_close_locator).click()
|
[
"1076168822@qq.com"
] |
1076168822@qq.com
|
87ece04732cb2acc1a56a4842377caf258f57fdf
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adverbs/_securely.py
|
354e1a50d4dc2383607cb4a3d91f2d7302d29de2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
#calss header
class _SECURELY():
def __init__(self,):
self.name = "SECURELY"
self.definitions = [u'in a way that avoids someone or something being harmed by any risk, danger, or threat: ', u'positioned or fastened firmly and correctly and therefore not likely to move, fall, or break: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9137b4b1bc0d434921790dbb3b1ea233760b6e87
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/4_set/6300. 最小公共值-集合交集.py
|
aa23a024dc580e11d35f90f6d3c465c7f728a010
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
from typing import List
# 给你两个整数数组 nums1 和 nums2 ,它们已经按非降序排序,
# 请你返回两个数组的 最小公共整数 。
# 如果两个数组 nums1 和 nums2 没有公共整数,请你返回 -1 。
class Solution:
def getCommon(self, nums1: List[int], nums2: List[int]) -> int:
"""数组已经排序"""
i, j = 0, 0
while i < len(nums1) and j < len(nums2):
if nums1[i] == nums2[j]:
return nums1[i]
elif nums1[i] < nums2[j]:
i += 1
else:
j += 1
return -1
def getCommon2(self, nums1: List[int], nums2: List[int]) -> int:
"""数组未排序"""
s1, s2 = set(nums1), set(nums2)
res = s1 & s2
if res:
return min(res)
return -1
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
b41b6d17177d2576edcafb7c2692ea634c2d003b
|
c97fc7658c39feb51c0ed42c04783797c8675b8a
|
/2018/pcy1/day28上--算法/算法6_重复范围.py
|
e72fd01cd5d72606f8d44868a30549fcced23ef7
|
[] |
no_license
|
githubvit/study
|
8bff13b18bea4954e8ed1b4619a091b134b8ff97
|
845e19d1225f1aa51c828b15effac30be42fdc1b
|
refs/heads/master
| 2023-02-20T15:59:19.635611
| 2021-12-15T08:30:54
| 2021-12-15T08:30:54
| 241,928,274
| 1
| 1
| null | 2023-02-02T06:18:48
| 2020-02-20T16:08:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
#_*_coding:utf-8_*_
'''
经典语句:程序就是算法加数据结构。
算法(Algorithm):一个计算机的计算过程,用来解决问题的方法
'''
'''
给定一个升序列表和一个整数,返回该整数在列表中的下标范围。
例如:列表[1,2,3,3,3,4,4,5],若查找3,则返回(2,4);若查找1,则返回(0,0)。
思路:
因为是有序,所以可以用二分查找找到3,
找到3后,定义左右两个指针,左指针左移,看是不是3,是就继续左移,直到不是就退出。
右指针同理,最后返回左右两指针,就得到了范围。
注意,指针不能越界
'''
def bin_serach_sum(olist,v):
#初始化指针
low=0
high=len(olist)-1
mid=(low+high)/2
while low<=high:#每一轮low、high、mid都在变化,就是不断的折半
#这里不能用while True,否则如果没有陷入死循环
if v==olist[mid]:
left= mid
right=mid
while left>=0 and olist[left]==v:
#注意:这里定义范围不能用left>=low,right<=high
# 初始的low和high已经变化了
left-=1
while right<=len(olist)-1 and olist[right]==v:
right+=1
return (left+1,right-1)
elif v>olist[mid]:#在右边,把low指针左移
low=mid+1
mid=(low+high)/2
continue
else:#在左边,把high指针右移
high=mid-1
mid = (low + high) / 2
continue
return #这里用return表示没找到,返回‘None'
li=[1,2,3,3,3,4,4,5]
print bin_serach_sum(li,1)
|
[
"sgq523@163.com"
] |
sgq523@163.com
|
d8b39a1c516d2f5c17f94296c319054c25bd24e2
|
871690900c8da2456ca2818565b5e8c34818658e
|
/boj/silver/14501.py
|
3000b1c08b7c01ae991fcf0d109895fc4640310b
|
[] |
no_license
|
kobeomseok95/codingTest
|
40d692132e6aeeee32ee53ea5d4b7af8f2b2a5b2
|
d628d72d9d0c1aef2b3fa63bfa9a1b50d47aaf29
|
refs/heads/master
| 2023-04-16T09:48:14.916659
| 2021-05-01T11:35:42
| 2021-05-01T11:35:42
| 311,012,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
from sys import stdin
READ = lambda : stdin.readline().strip()
n = int(READ())
t, p = [], []
dp = [0] * (n + 1)
for _ in range(n):
ti, pi = map(int, READ().split())
t.append(ti)
p.append(pi)
max_val = 0
for i in range(n-1, -1, -1):
time = t[i] + i
if time <= n:
dp[i] = max(p[i] + dp[time], max_val)
max_val = dp[i]
else:
dp[i] = max_val
print(max_val)
|
[
"37062337+kobeomseok95@users.noreply.github.com"
] |
37062337+kobeomseok95@users.noreply.github.com
|
8cd065138546caa19d8dfb80cad98bd36cc575af
|
1f4505ed66f4fd68c6d1edf18ecff58362742fad
|
/algorithm/TwoPointer/18_4Sum.py
|
97632d69454427fc6ac5b7ebff3128538fed3770
|
[
"MIT"
] |
permissive
|
nishitpatel01/Data-Science-Toolbox
|
0d9b63a365698cc4a423abd5881cde8f6bf672be
|
80dc1310d103c9481feff8792426c550ddcc0a36
|
refs/heads/master
| 2020-05-19T08:26:40.319321
| 2019-05-04T05:58:48
| 2019-05-04T05:58:48
| 184,921,541
| 1
| 1
|
MIT
| 2019-05-04T16:53:21
| 2019-05-04T16:53:20
| null |
UTF-8
|
Python
| false
| false
| 2,264
|
py
|
class Solution:
# combination of two fixed number, reducing the rest of the problem to 2-sum
def fourSumPart1(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
print(nums)
N = 4
ans = []
def combo(nums, N, ans, path):
if N == 2:
ans.append(path)
return
for i in range(len(nums) - N + 1):
if i > 0 and nums[i] == nums[i - 1]:
continue
combo(nums[i + 1:], N - 1, ans, path + [nums[i]])
combo(nums, N, ans, [])
print(ans)
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
print(nums)
N = 4
ans = []
def combo(nums, N, ans, path, target):
if N == 2:
# solve two-sums
l, r = 0, len(nums) - 1
while l < r:
if nums[l] + nums[r] == target:
ans.append(path + [nums[l], nums[r]])
l += 1
r -= 1
while l < r and nums[l] == nums[l - 1]:
l += 1
while l < r and nums[r] == nums[r + 1]:
r -= 1
elif nums[l] + nums[r] < target:
l += 1
else:
r -= 1
else:
for i in range(len(nums) - N + 1):
# take advantages of sorted list (optional
if target < nums[i] * N or target > nums[-1] * N:
break
# avoid duplicate trees
if i > 0 and nums[i] == nums[i - 1]:
continue
# because array is sorted, only need to pick remaining element from [i+1:]
combo(nums[i + 1:], N - 1, ans, path + [nums[i]], target - nums[i])
combo(nums, N, ans, [], target)
return ans
solver = Solution()
solver.fourSum([1, 0, -1, 0, -2, 2], 0)
|
[
"shawlu@github.com"
] |
shawlu@github.com
|
1b86c9cfdddd0628d6c56e3cdd522b0b23889b15
|
62c10b93d7c80df17b6a38f53300d697dea04b00
|
/cryptoparty/mixins.py
|
340e284535616cc7a9f6b33b5f4eea158cae7b93
|
[] |
no_license
|
cryptopartydk/cryptoparty.dk
|
bf2512330b2776c70c71b677df2a6ee36cba48d8
|
49d5fa04ed253c771d5c72f395a9819fec4d8fe6
|
refs/heads/master
| 2021-01-17T07:05:31.314722
| 2016-05-15T10:48:18
| 2016-05-15T10:48:18
| 28,929,476
| 1
| 1
| null | 2016-04-22T09:26:20
| 2015-01-07T19:14:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 284
|
py
|
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
class LoginRequiredMixin:
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
|
[
"valberg@orn.li"
] |
valberg@orn.li
|
f14980ac0bf82ecaf233ca9e8cad6c3dbb13e8b6
|
f8b9e5de8823ff810ec445b6fa6d0e34f7b6319f
|
/Django/PracticeExam2_project/apps/Dashboard_app/models.py
|
27d48bb144c743729cd67c2f88ef043234207b43
|
[] |
no_license
|
amalfushi/Python
|
6c042443a8aeae15fc96a41a692abdbea05db863
|
067c2cef722457e884833f77baf9f44f45a4a165
|
refs/heads/master
| 2021-01-24T04:08:21.278071
| 2018-02-26T06:25:59
| 2018-02-26T06:25:59
| 122,923,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from ..LoginReg3_app.models import User
# Create your models here.
class CommentManager(models.Manager):
def createComment(self, postData):
tempComment = self.create(
comment_text = postData['newComment'],
recipient = User.objects.get(id=postData['recipient']),
sender = User.objects.get(id=postData['userID']),
)
def validateComment(self, postData):
results ={'status': True, 'errors': []}
if len(postData['newComment']) < 2:
results['status'] = False
results['errors'].append('Comments must be at least 2 characters')
class Comment(models.Model):
comment_text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
recipient = models.ForeignKey(User, related_name='recieved_comments')
sender = models.ForeignKey(User, related_name='sent_comments')
objects = CommentManager()
|
[
"dustin.p.schroeder@gmail.com"
] |
dustin.p.schroeder@gmail.com
|
9a2147565b1ba65de24b5cc99f64ed5f4e1fa0ee
|
e6871ff61d28e5e2acf201b459da29c20a8d3dca
|
/setup.py
|
79685d9e4a85ba9dad9f06f7069a611467159ac2
|
[
"Apache-2.0"
] |
permissive
|
csams/squerly
|
9d2f38b23cae4034308806f1ebdcc99a0208d868
|
62e416dfc8b418d1cc361b59f401accc8c111b52
|
refs/heads/master
| 2022-11-29T09:32:48.019336
| 2020-08-12T22:17:06
| 2020-08-12T22:17:06
| 261,189,120
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
runtime = set([
"pyyaml",
])
develop = set([
"coverage",
"flake8",
"pytest",
"pytest-cov",
"setuptools",
"twine",
"wheel",
])
docs = set([
"Sphinx",
"sphinx_rtd_theme",
])
optional = set([
"pandas",
"IPython"
])
if __name__ == "__main__":
with open(os.path.join(here, "README.md")) as f:
long_description = f.read()
setup(
name="squerly",
version="0.2.2",
description="Squerly takes the tedium out of nested dicts and lists.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/csams/squerly",
author="Christopher Sams",
author_email="csams@gmail.com",
packages=find_packages(),
install_requires=list(runtime),
package_data={"": ["LICENSE"]},
license="Apache 2.0",
extras_require={
"develop": list(develop | docs | optional),
"docs": list(docs),
"optional": list(optional),
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"
],
include_package_data=True
)
|
[
"csams@redhat.com"
] |
csams@redhat.com
|
c79b0645329cf6d9eb5b20d1430672e0d0dc2f1d
|
e51742f3b8026d6ae44761445689ac47f1f2e495
|
/plot_two_file_groups.py
|
57eed9c40f8b30805a5ecb90effe559dc0679f8d
|
[] |
no_license
|
igridchyn/neuro-analysis
|
a2979692ea6a2c99dc0991bc371125f0ec6edb5d
|
c0b1bb6abc9b34328aa515ce4a9098f3cbc9f05a
|
refs/heads/main
| 2023-06-26T09:48:46.250806
| 2021-07-29T09:34:53
| 2021-07-29T09:34:53
| 390,671,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,447
|
py
|
#!/usr/bin/env python
# plot data from two groups of files (obtained by find through given
from matplotlib import pyplot as plt
from sys import argv
import subprocess
import numpy as np
from math import sqrt
import os
from scipy.stats import ttest_ind
if len(argv) < 2:
print '(1)<wildcard 1> (2)<wildcard 2>'
wc1 = argv[1]
wc2 = argv[2]
corlist = len(argv) > 3
if corlist:
print 'WARNING: correlation list mode ON'
cmd = 'find . -name "' + wc1 + '"'
sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
fl1 = sp.communicate()[0].split()
cmd = 'find . -name "' + wc2 + '"'
sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
fl2 = sp.communicate()[0].split()
if len(fl1) != len(fl2):
print 'Different sizes of file lists'
v1 = []
v2 = []
# bad days
bd = []
for i in range(len(fl1)):
if '1124' in fl1[i] or '0226' in fl1[i]:
bd.append(i)
#print 'WARNING: 1124/0226 excluded'
#continue
apath = os.path.dirname(fl1[i]) + '/../about.txt'
for line in open(apath):
ws = line.split(' ')
if ws[0] == 'swap':
print i, fl1[i], fl2[i], line
swap = bool(int(ws[1]))
if swap:
if not corlist:
v1.extend([float(a) for a in open(fl1[i])])
v2.extend([float(a) for a in open(fl2[i])])
else:
v1.extend([[float(l.split(' ')[0]), float(l.split(' ')[1])] for l in open(fl1[i])])
v2.extend([[float(l.split(' ')[0]), float(l.split(' ')[1])] for l in open(fl2[i])])
else:
if not corlist:
v2.extend([float(a) for a in open(fl1[i])])
v1.extend([float(a) for a in open(fl2[i])])
else:
v2.extend([[float(l.split(' ')[0]), float(l.split(' ')[1])] for l in open(fl1[i])])
v1.extend([[float(l.split(' ')[0]), float(l.split(' ')[1])] for l in open(fl2[i])])
mn = min(min(v1), min(v2))
mx = max(max(v1), max(v2))
plt.figure()
# plt.scatter(v1, v2)
# plt.plot([0, 1], [0, 1])
#if corlist:
# v1 = [np.corrcoef([v[0] for v in v1], [v[1] for v in v1])[0, 1]]
# v2 = [np.corrcoef([v[0] for v in v2], [v[1] for v in v2])[0, 1]]
print 'WARNING: RATE'
v1 = [(v[0] - v[1]) * 2/ (v[0] + v[1]) for v in v1]
v2 = [(v[0] - v[1]) * 2/ (v[0] + v[1]) for v in v2]
#plt.scatter(range(len(v1)), [(v1[i] - v2[i]) / v2[i] for i in range(len(v1))])
#plt.scatter(bd, [0] * len(bd), color='r')
bw = 0.2
v1 = np.array(v1)
v2 = np.array(v2)
v1 = v1[~np.isnan(v1) & ~np.isinf(v1) & (v1 != 0)]
v2 = v2[~np.isnan(v2) & ~np.isinf(v2) & (v2 != 0)]
#v1 = np.log(v1)
#v2 = np.log(v2)
m1 = -np.mean(v1)
m2 = -np.mean(v2)
#plt.bar([0, 1], [m1, m2], width = bw)
plt.bar([0], [m1], width = bw)
plt.bar([0.5], [m2], width = bw, color = 'Red')
plt.legend(['Target', 'Control'], fontsize = 20, loc='best')
plt.errorbar([0 + bw/2, 0.5 + bw/2], [m1, m2], yerr = [np.std(v1) / sqrt(v1.shape[0]), np.std(v2) / sqrt(v2.shape[0])], color = 'black', linewidth = 5, linestyle='None')
plt.xlim(0-bw, 0.7 + bw)
plt.gca().get_xaxis().set_visible(False)
plt.plot([-0.5, 1.0], [0, 0], color = 'black', linewidth = 3)
#plt.text(0.3, 0.12, '***', fontsize = 30)
plt.show()
print v1
print v2
if True:
#for d in reversed(bd):
# del v1[d]
# del v2[d]
# print 'WARNING: Exclude ', d
v1 = np.array(v1)
v2 = np.array(v2)
ind = ~np.isnan(v1) & ~np.isnan(v2) #& (v1 > 0) & (v2 > 0)
print ind
v1 = v1[ind]
v2 = v2[ind]
print v1
print v2
print np.mean(v1), np.std(v1) / sqrt(len(v1))
print np.mean(v2), np.std(v2) / sqrt(len(v2))
print 'T-test p-value: ', ttest_ind(v1, v2)[1]
|
[
"igor.gridchin@gmail.com"
] |
igor.gridchin@gmail.com
|
f8337cceff2742fc8e0488c9ed9833cc0cb05521
|
a36eb4685fd050c8e1ecb4a333470724bd76df60
|
/Leetcode/Jul20/050720/AntPlank/test.py
|
c4254221c68a019d0bd254ce37bb1ae5ea5e8bcb
|
[] |
no_license
|
phibzy/Contests
|
c9cff976909234cfafc51db9d9dde01c26123168
|
24aac4c81f34916945be03ed0b7c916dae4dbbb4
|
refs/heads/master
| 2023-01-20T06:23:06.837937
| 2020-11-30T06:54:58
| 2020-11-30T06:54:58
| 265,750,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
#!/usr/bin/python3
"""
Test Cases:
"""
import unittest
from antPlank import Solution
class testPlank(unittest.TestCase):
a = Solution()
def testAllRight(self):
self.assertEqual(self.a.getLastMoment(7, [], [0,1,2,3,4,5,6,7]), 7)
def testAllLeft(self):
self.assertEqual(self.a.getLastMoment(7, [0,1,2,3,4,5,6,7], []), 7)
def testExampleCase(self):
self.assertEqual(self.a.getLastMoment(4, [0,1], [3,4]), 4)
|
[
"phibzy@gmail.com"
] |
phibzy@gmail.com
|
fbb4ce7292b75c77504d3408177443a56cbf6a6f
|
7c4878b4881d79dd4daa3291e9c498e0706a7603
|
/lesson15/index/task1.py
|
7ec8ad71cd52424fb426eb3959d9f4db69b6d38c
|
[
"MIT"
] |
permissive
|
zainllw0w/skillbox
|
4cbdbb44762439c1aa1793a07683d7620500ddd7
|
896287b6f7f5612cf589094131fd1a12b0b192ba
|
refs/heads/main
| 2023-04-27T16:07:16.613359
| 2021-05-20T14:12:11
| 2021-05-20T14:12:11
| 329,755,030
| 0
| 0
| null | 2021-05-20T14:06:42
| 2021-01-14T23:01:15
|
Python
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
nums_list = []
N = int(input('Кол-во чисел в списке: '))
for _ in range(N):
num = int(input('Очередное число: '))
nums_list.append(num)
maximum = nums_list[1]
minimum = nums_list[1]
for i in nums_list:
if maximum < i:
maximum = i
if minimum > i:
minimum = i
print('Максимальное число в списке:', maximum)
print('Минимальное число в списке:', minimum)
|
[
"77465388+zainllw0w@users.noreply.github.com"
] |
77465388+zainllw0w@users.noreply.github.com
|
459f40f5198fa6e892d8a8fa7a96f60e4f6eff43
|
a707312fc63fdf0b1b719d8d593695de082cad7f
|
/betterRest_jira_example/__init__.py
|
875da41c9e3be43e1c5860b292e19e04e70a3fc2
|
[] |
no_license
|
fdev31/bugrest
|
e75f7c1f387ead4636f56994bfbdbb5afca24368
|
2713712b8ae2b2d9b3b5a8b43c8c5e302da6271a
|
refs/heads/master
| 2023-02-08T19:48:55.702820
| 2023-01-27T19:02:51
| 2023-01-27T19:02:51
| 83,884,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
__all__ = ["commands"]
class CFG:
@classmethod
def asDict(kls):
return {}
jira_plugin = {}
def init(env, opts):
# must be loaded after the jira plugin !!
jira_plugin["mod"] = env["plugins"]["jira"]
jira_plugin["cfg"] = env["plugins"]["jira"].CFG
def get_jira_object():
return jira_plugin["mod"].get_jira_object()
def get_cfg():
return jira_plugin["cfg"]
def cmd_update(handler, *bugids):
"update: [bug id]+ execute custom UPDATE code (edit source) on all JIRA bugs or only the specified ones"
cfg = get_cfg()
if not bugids:
bugids = [b for b in (bug[cfg.id_name] for bug in handler) if b]
print("Upading %d bugs" % len(bugids))
input("Are you sure? (^C to cancel) ")
jira = get_jira_object()
for bugid in bugids:
bugid = bugid.rstrip("_,")
if bugid.startswith(cfg.id_name):
bugid = bugid[len(cfg.id_name) + 1 :]
print(bugid)
jbug = jira.issue(bugid)
# Update logic here:
commands = {k: v for k, v in globals().items() if k.startswith("cmd_")}
|
[
"fdev31@gmail.com"
] |
fdev31@gmail.com
|
88181d49242a84e85aa17f9e8c5e203a41cbf39f
|
2ce0c37ac7d9beeac23db688f97a1f502b92d13a
|
/products/migrations/0001_initial.py
|
8c3c7cb80e517b9df5e5a887ea84aba26a843734
|
[] |
no_license
|
AmrElsayedEG/inventory-system
|
0cdb0634b33117b13bfcae8642f979448d831369
|
d4bc483612c3b721918d75f24ab0d7fa29b78ce3
|
refs/heads/main
| 2023-08-20T22:32:25.113740
| 2021-10-04T08:55:44
| 2021-10-04T08:55:44
| 413,344,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,428
|
py
|
# Generated by Django 3.2 on 2021-09-02 12:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('merchants', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'db_table': 'category',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('Category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.category')),
('supplier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='merchants.supplier')),
],
options={
'verbose_name': 'product',
'verbose_name_plural': 'products',
'db_table': 'product',
},
),
]
|
[
"elsayed.amr50@gmail.com"
] |
elsayed.amr50@gmail.com
|
5e76891c6fd61b4b76cbc83b35b90119affe864e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_188/ch43_2019_04_16_13_18_38_414865.py
|
9d7a9dc016757aa22b66c02a79763c38cff18db9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
def sequencia_collatz(n):
contador = -1
while n!= 1:
if n % 2 == 0:
n /= 2
else:
n = (3*n) + 1
contador += 1
return contador
numero = 1000
maior = 0
while numero > 0:
teste = sequencia_collatz(numero)
if teste > maior:
maior = teste
resultado = numero
numero -= 1
print(resultado)
|
[
"you@example.com"
] |
you@example.com
|
eeaca050f85a8469d941da81394dbfc600b85fc2
|
1b42b04a27c33bfa704f4f108c9986cd558d7545
|
/external_libs/pyzmq-14.5.0/python3/ucs4/32bit/zmq/tests/test_pair.py
|
48473f08b608acc7577818b210bc9c2b80ae0831
|
[] |
no_license
|
Nandan307/trex_stl_lib
|
addbe9e42cc69a84fba59b385b108f4f22165534
|
8b3a204308475bff79a6bb7dd1419bbf18c10ffd
|
refs/heads/master
| 2021-01-25T00:33:17.552192
| 2018-02-28T14:41:56
| 2018-02-28T14:41:56
| 123,301,115
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import zmq
from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest
x = b' '
class TestPair(BaseZMQTestCase):
def test_basic(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
msg1 = b'message1'
msg2 = self.ping_pong(s1, s2, msg1)
self.assertEqual(msg1, msg2)
def test_multiple(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
for i in range(10):
msg = i*x
s1.send(msg)
for i in range(10):
msg = i*x
s2.send(msg)
for i in range(10):
msg = s1.recv()
self.assertEqual(msg, i*x)
for i in range(10):
msg = s2.recv()
self.assertEqual(msg, i*x)
def test_json(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
o = dict(a=10,b=list(range(10)))
o2 = self.ping_pong_json(s1, s2, o)
def test_pyobj(self):
s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
o = dict(a=10,b=list(range(10)))
o2 = self.ping_pong_pyobj(s1, s2, o)
if have_gevent:
class TestReqRepGreen(GreenTest, TestPair):
pass
|
[
"nakulkar@nakulkar.bos.csb"
] |
nakulkar@nakulkar.bos.csb
|
5072ea5d23fdff3d70a3bb7c00566d5eafeb2462
|
d552a3c92155d82ad146cd99ea9b8b4a3b65eab7
|
/openstack/tests/unit/accelerator/v2/test_device.py
|
22b17b336ec7b59cf89ffb03febaf3e54fa4d5c5
|
[
"Apache-2.0"
] |
permissive
|
jlyheden/openstacksdk
|
600201d4fbf23fd8a4fa9a53b398b29811446051
|
7e0dcaaa4a69b17b97e746ce8de104689c60becc
|
refs/heads/master
| 2022-11-30T19:15:16.113961
| 2020-06-07T18:02:22
| 2020-06-07T18:02:23
| 270,694,856
| 0
| 0
|
Apache-2.0
| 2020-06-08T14:15:36
| 2020-06-08T14:15:35
| null |
UTF-8
|
Python
| false
| false
| 2,029
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.tests.unit import base
from openstack.accelerator.v2 import device
EXAMPLE = {
'id': '1',
'uuid': uuid.uuid4(),
'created_at': '2019-08-09T12:14:57.233772',
'updated_at': '2019-08-09T12:15:57.233772',
'type': 'test_type',
'vendor': '0x8086',
'model': 'test_model',
'std_board_info': '{"product_id": "0x09c4"}',
'vendor_board_info': 'test_vb_info',
}
class TestDevice(base.TestCase):
def test_basic(self):
sot = device.Device()
self.assertEqual('device', sot.resource_key)
self.assertEqual('devices', sot.resources_key)
self.assertEqual('/devices', sot.base_path)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = device.Device(**EXAMPLE)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['uuid'], sot.uuid)
self.assertEqual(EXAMPLE['type'], sot.type)
self.assertEqual(EXAMPLE['vendor'], sot.vendor)
self.assertEqual(EXAMPLE['model'], sot.model)
self.assertEqual(EXAMPLE['std_board_info'], sot.std_board_info)
self.assertEqual(EXAMPLE['vendor_board_info'], sot.vendor_board_info)
self.assertEqual(EXAMPLE['created_at'], sot.created_at)
self.assertEqual(EXAMPLE['updated_at'], sot.updated_at)
|
[
"mordred@inaugust.com"
] |
mordred@inaugust.com
|
3165e50dd296edb0a5d28b024e1c4e2ee1969e43
|
7a2d2cfbe99a13920e55e462bd40627e34d18f23
|
/openbb_terminal/cryptocurrency/overview/sdk_helpers.py
|
ccec6062d9340aec34117974ad16ac948ae80aba
|
[
"MIT"
] |
permissive
|
conrad-strughold/GamestonkTerminal
|
b9ada627929dbc1be379f19c69b34e24764efcff
|
c9aa674d979a7c7fd7f251410ceaa1c8a4ef2e6e
|
refs/heads/main
| 2023-06-24T02:59:45.096493
| 2023-05-16T15:15:20
| 2023-05-16T15:15:20
| 342,313,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
"""Crypto OV SDK Helper Functions."""
__docformat__ = "numpy"
import pandas as pd
from openbb_terminal.cryptocurrency.dataframe_helpers import (
lambda_long_number_format_with_type_check,
)
from openbb_terminal.cryptocurrency.overview import coinpaprika_model, pycoingecko_model
def globe(source: str = "CoinGecko") -> pd.DataFrame:
"""Get global crypto market data.
Parameters
----------
source : str, optional
Source of data, by default "CoinGecko"
Returns
-------
pd.DataFrame
DataFrame with global crypto market data
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> global_market_data = openbb.crypto.ov.globe()
To get data from CoinPaprika, use the source parameter:
>>> global_market_data = openbb.crypto.ov.globe(source="coinpaprika")
"""
if source.lower() == "coingecko":
df = pycoingecko_model.get_global_info()
return df
if source.lower() == "coinpaprika":
df = coinpaprika_model.get_global_info()
return df
return pd.DataFrame()
def exchanges(source: str = "CoinGecko") -> pd.DataFrame:
"""Show top crypto exchanges.
Parameters
----------
source : str, optional
Source to get exchanges, by default "CoinGecko"
Returns
-------
pd.DataFrame
DataFrame with top crypto exchanges
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> exchanges = openbb.crypto.ov.exchanges()
"""
if source.lower() == "coingecko":
df = pycoingecko_model.get_exchanges().sort_values(by="Rank", ascending=True)
return df
if source.lower() == "coinpaprika":
df = coinpaprika_model.get_list_of_exchanges("USD")
cols = [col for col in df.columns if col != "Rank"]
df[cols] = df[cols].applymap(
lambda x: lambda_long_number_format_with_type_check(x)
)
return df.sort_values(by="Rank", ascending=True).reset_index(drop=True).head(20)
return pd.DataFrame()
|
[
"noreply@github.com"
] |
conrad-strughold.noreply@github.com
|
4fd5872538497ddefc32a9afbb191cc29aaefc25
|
9cbacb28ae219c3b16e0ba0e5fc9e14a7f36e4c6
|
/paper-code/tensorflow_src/models/task/task_chatter.py
|
3c8bc4ac778984da4cb57f12b2ce7f7af574b9f4
|
[
"Apache-2.0"
] |
permissive
|
hquzhuguofeng/nlp-paper-1
|
7be27a30d304d789d9f6b45bef0803ff90e31aa2
|
e710574c95b671e848f4e3b6ad1d275a4adbf1fb
|
refs/heads/master
| 2023-08-27T07:47:54.575198
| 2021-10-22T12:28:22
| 2021-10-22T12:28:22
| 420,844,157
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,215
|
py
|
import os
import sys
import time
import tensorflow as tf
import model.model as task
from common.kb import load_kb
sys.path.append(sys.path[0][:-10])
from model.chatter import Chatter
import common.data_utils as _data
from common.common import CmdParser
import config.get_config as _config
from common.pre_treat import preprocess_raw_task_data
class TaskChatter(Chatter):
"""
Task模型的聊天器
"""
def __init__(self, checkpoint_dir, beam_size):
super().__init__(checkpoint_dir, beam_size)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
self.checkpoint_prefix = os.path.join(self.checkpoint_dir, "ckpt")
self.optimizer = tf.keras.optimizers.RMSprop()
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
def _init_loss_accuracy(self):
print('待完善')
def _train_step(self, inp, tar, step_loss):
print('待完善')
def _create_predictions(self, inputs, dec_input, t):
print('待完善')
def train(self, dict_fn, data_fn, start_sign, end_sign, max_train_data_size):
_, _, lang_tokenizer = _data.load_dataset(dict_fn=dict_fn, data_fn=data_fn, start_sign=start_sign,
end_sign=end_sign, max_train_data_size=max_train_data_size)
data_load = _data.load_data(_config.dialogues_train, _config.max_length, _config.database, _config.ontology,
lang_tokenizer.word_index, _config.max_train_data_size, _config.kb_indicator_len)
model = task.task(_config.units, data_load.onto,
_config.vocab_size, _config.embedding_dim, _config.max_length)
checkpoint = tf.train.Checkpoint(model=model, optimizer=self.optimizer)
ckpt = tf.io.gfile.listdir(self.checkpoint_dir)
if ckpt:
checkpoint.restore(tf.train.latest_checkpoint(self.checkpoint_dir)).expect_partial()
sample_sum = len(data_load)
for epoch in range(_config.epochs):
print('Epoch {}/{}'.format(epoch + 1, _config.epochs))
start_time = time.time()
batch_sum = 0
while (True):
_, _, _, usr_utts, _, state_gt, kb_indicator, _ = data_load.next()
if data_load.cur == 0:
break
kb_indicator = tf.convert_to_tensor(kb_indicator)
with tf.GradientTape() as tape:
state_preds = model(inputs=[usr_utts, kb_indicator])
loss = 0
for key in state_preds:
loss += tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)(state_gt[key], state_preds[key])
gradients = tape.gradient(loss, model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
self.train_loss(loss)
kb = load_kb(_config.database, "name")
batch_sum = batch_sum + len(usr_utts)
print('\r', '{}/{} [==================================]'.format(batch_sum, sample_sum), end='',
flush=True)
step_time = (time.time() - start_time)
sys.stdout.write(' - {:.4f}s/step - loss: {:.4f}\n'
.format(step_time, self.train_loss.result()))
sys.stdout.flush()
checkpoint.save(file_prefix=self.checkpoint_prefix)
print('训练结束')
def main():
parser = CmdParser(version='%task chatbot V1.0')
parser.add_option("-t", "--type", action="store", type="string",
dest="type", default="pre_treat",
help="execute type, pre_treat/train/chat")
(options, args) = parser.parse_args()
chatter = TaskChatter(checkpoint_dir=_config.task_train_data, beam_size=_config.beam_size)
if options.type == 'train':
chatter.train(dict_fn=_config.dict_fn,
data_fn=_config.dialogues_tokenized,
start_sign='<sos>',
end_sign='<eos>',
max_train_data_size=0)
elif options.type == 'chat':
print('Agent: 你好!结束聊天请输入ESC。')
while True:
req = input('User: ')
if req == 'ESC':
print('Agent: 再见!')
exit(0)
# response = chatter.respond(req)
response = '待完善'
print('Agent: ', response)
elif options.type == 'pre_treat':
preprocess_raw_task_data(raw_data=_config.dialogues_train,
tokenized_data=_config.dialogues_tokenized,
semi_dict=_config.semi_dict,
database=_config.database,
ontology=_config.ontology)
else:
parser.error(msg='')
if __name__ == "__main__":
"""
TaskModel入口:指令需要附带运行参数
cmd:python task_chatter.py -t/--type [执行模式]
执行类别:pre_treat/train/chat
chat模式下运行时,输入exit即退出对话
"""
main()
|
[
"1210212670@qq.com"
] |
1210212670@qq.com
|
f1905277edc4ede3808ba32c2600771cc11cb93f
|
210691d0d62955bb1cb45e4221718418545c37f8
|
/t/test_wc.py
|
12001d56bf64f7578b338f0d5b81beb4c5fabe16
|
[] |
no_license
|
szabgab/pytools
|
d0445e1fc612592beb5706cb7af617ed2fb56df8
|
c0cd51768a1a728b55c7d78fbba35c2aeda53d7c
|
refs/heads/master
| 2021-01-18T18:25:15.914492
| 2012-11-15T07:51:26
| 2012-11-15T07:51:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
import unittest
#import sys
#sys.path.append('bin')
#import wc
class TestWC(unittest.TestCase):
def setUp(self):
pass
# create temporary file
def test_wc(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
[
"gabor@szabgab.com"
] |
gabor@szabgab.com
|
23b4f421a0a3dfcf6dd0e936f4b47bf527924735
|
cf74841051a2ea1d11a6765fa76d64a1caad9293
|
/mysite/settings.py
|
7e6e00aa76b52ef0affa6a33b9c1eab72b1cfd8e
|
[] |
no_license
|
kattosai/my-first-blog
|
6e8e39e5300632c66418c45265eb34aa4263f9e3
|
b18c6149711b4ab512d2686be6576bf37820e810
|
refs/heads/master
| 2020-05-05T11:12:00.849690
| 2019-04-08T13:27:33
| 2019-04-08T13:27:33
| 179,979,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,183
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$(zf!850m6&rge1m@rs6h1p82z5a+=2u=u99vhr3#3jtky81!8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
|
[
"you@example.com"
] |
you@example.com
|
8fef98c2654ba9af7518648c1307f2dfd67c0ac1
|
8246092010e656920e7199f889f9cbf54b83a729
|
/pycoin/symbols/ric.py
|
0cb5179b08fc317c81336784f54f585757c5fe15
|
[
"MIT"
] |
permissive
|
richardkiss/pycoin
|
5717411a11445773ac922c1d1c1b7dbe4835cd77
|
b41ad7d02e52d9869a8c9f0dbd7d3b2b496c98c0
|
refs/heads/main
| 2023-08-07T12:14:04.974934
| 2023-04-18T02:27:15
| 2023-04-18T02:27:15
| 10,917,677
| 1,306
| 489
|
MIT
| 2023-06-03T23:24:50
| 2013-06-24T19:17:52
|
Python
|
UTF-8
|
Python
| false
| false
| 319
|
py
|
from pycoin.networks.bitcoinish import create_bitcoinish_network
network = create_bitcoinish_network(
symbol="RIC", network_name="Riecoin", subnet_name="mainnet",
wif_prefix_hex="80", address_prefix_hex="3c", pay_to_script_prefix_hex="05",
bip32_prv_prefix_hex="0488ade4", bip32_pub_prefix_hex="0488b21e")
|
[
"him@richardkiss.com"
] |
him@richardkiss.com
|
d689b65e58f4d60db8f65090a8b0e8b0bdbffa84
|
8546c0e45ea7a10e07d4e5f9f8123026ecb0e54e
|
/backend/ancient_art_29276/urls.py
|
93c0e60acd785ae79a01048668d4100015d480b4
|
[] |
no_license
|
crowdbotics-apps/ancient-art-29276
|
a799ab4abbbe0ab2583c661552d7095f25650413
|
e31537524839e28b1ed44ebddffe9ac95e47c582
|
refs/heads/master
| 2023-07-03T16:46:51.569667
| 2021-07-29T23:15:17
| 2021-07-29T23:15:17
| 390,872,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
"""ancient_art_29276 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Ancient Art"
admin.site.site_title = "Ancient Art Admin Portal"
admin.site.index_title = "Ancient Art Admin"
# swagger
api_info = openapi.Info(
title="Ancient Art API",
default_version="v1",
description="API documentation for Ancient Art App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bdbf206496a0420317ec6c60a8d1a7b1c3577906
|
12cfd1678317f40ca778b8c65d7323637c8c358d
|
/2018腾讯广告算法大赛/team1_lgb&nffm/src/014_CV_cvr_select_p.py
|
7582b0de170d96bf099fdfb6df27df2bb75f6f8b
|
[] |
no_license
|
moonlight1776/competitions
|
e73d0ce358d2b5662b801820fd2f6434f2af4e2e
|
a372a2051a2a1580feb280ce6842a785459cf8d1
|
refs/heads/master
| 2022-03-21T09:31:20.276644
| 2019-11-09T05:37:46
| 2019-11-09T05:37:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,000
|
py
|
##筛选特征
col_new = ['cvr_of_aid_and_onehot7',
'cvr_of_age_and_onehot7',
'cvr_of_consumptionAbility_and_onehot1',
'cvr_of_aid_and_onehot3', 'cvr_of_advertiserId_and_onehot3',
'cvr_of_advertiserId_and_onehot1', 'cvr_of_gender_and_onehot2',
'cvr_of_campaignId_and_onehot7', 'cvr_of_campaignId_and_onehot3',
'cvr_of_adCategoryId_and_onehot3', 'cvr_of_creativeSize_and_onehot6',
'cvr_of_advertiserId_and_onehot10', 'cvr_of_campaignId_and_onehot1',
'cvr_of_age_and_onehot1', 'cvr_of_creativeSize_and_onehot5',
'cvr_of_aid_and_onehot5', 'cvr_of_creativeSize_and_onehot2',
'cvr_of_advertiserId_and_onehot6', 'cvr_of_age_and_onehot10',
'cvr_of_consumptionAbility_and_onehot7', 'cvr_of_age_and_onehot2',
'cvr_of_os_and_onehot4', 'cvr_of_age_and_onehot6',
'cvr_of_creativeSize_and_onehot3', 'cvr_of_advertiserId_and_onehot8',
'cvr_of_carrier_and_onehot4', 'cvr_of_adCategoryId_and_onehot2',
'cvr_of_creativeSize_and_onehot10', 'cvr_of_aid_and_onehot1',
'cvr_of_creativeSize_and_onehot7', 'cvr_of_campaignId_and_onehot5',
'cvr_of_advertiserId_and_onehot4', 'cvr_of_aid_and_onehot10',
'cvr_of_productId_and_onehot7', 'cvr_of_creativeSize_and_onehot8',
'cvr_of_aid_and_onehot6', 'cvr_of_productType_and_onehot9',
'cvr_of_advertiserId_and_onehot7',
'cvr_of_consumptionAbility_and_onehot4', 'cvr_of_advertiserId_and_onehot2']
from lightgbm import LGBMClassifier
import time
from sklearn.metrics import roc_auc_score
import pandas as pd
import numpy as np
import random
import warnings
warnings.filterwarnings("ignore")
print('Reading train...')
train_part_x = pd.DataFrame()
evals_x = pd.DataFrame()
train_part_y = pd.read_csv('data_preprocessing/train_part_y_p.csv',header=None)
evals_y = pd.read_csv('data_preprocessing/evals_y_p.csv',header=None)
for i in range(1,16):
train_part_x = pd.concat([train_part_x,pd.read_csv('data_preprocessing/train_part_x_CV_cvr_'+str(i)+'.csv')],axis=1)
evals_x = pd.concat([evals_x,pd.read_csv('data_preprocessing/evals_x_CV_cvr_'+str(i)+'.csv')],axis=1)
for co in evals_x.columns:
if co not in col_new:
del evals_x[co]
del train_part_x[co]
print(i)
print('train_part...')
train_part_x[col_new].to_csv('data_preprocessing/train_part_x_CV_cvr_select_p.csv',index=False)
print('evals...')
evals_x[col_new].to_csv('data_preprocessing/evals_x_CV_cvr_select_p.csv',index=False)
train_part_x = pd.DataFrame()
evals_x = pd.DataFrame()
test2_x = pd.DataFrame()
print('Reading test...')
for i in range(1,16):
test2_x = pd.concat([test2_x,pd.read_csv('data_preprocessing/test2_x_CV_cvr_'+str(i)+'.csv')],axis=1)
for co in test2_x.columns:
if co not in col_new:
del test2_x[co]
print(i)
print('test2...')
test2_x[col_new].to_csv('data_preprocessing/test2_x_CV_cvr_select_p.csv',index=False)
print('Over')
|
[
"327643958@qq.com"
] |
327643958@qq.com
|
e934a0bdc018a0da5a8ea763e93fcfd986997a79
|
0d2d3058feddb9be205427a142e0320821a0ca9c
|
/myapp/urls.py
|
42aae4734d65f266b9c79707e4e90aa112ccaba2
|
[] |
no_license
|
Emad-ahmed/Leading_university_website
|
76085d93df2f24940b9a2023873add6d26c044f3
|
db12faca90041c606972a7eccf8fb62701b26a88
|
refs/heads/main
| 2023-06-06T21:10:54.051778
| 2021-07-07T03:42:56
| 2021-07-07T03:42:56
| 383,501,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
from myapp.views.faculty import eng
from django.urls import path
from myapp.views import HomeView, shownotice, shownews, LoginView, SignupView, user_logout, EmailView, bba, cse, eng, StudentView
from django.contrib.auth import views as auth_views
from .forms import MyPasswordResetForm, MySetPasswordForm, MyPasswordChangeForm
urlpatterns = [
path('', HomeView.as_view(), name="home"),
path('user_login', LoginView.as_view(), name="user_login"),
path('student', StudentView.as_view(), name="student"),
path('user_logout', user_logout, name="user_logout"),
path('signup', SignupView.as_view(), name="signup"),
path('shownotice/<int:id>', shownotice, name="shownotice"),
path('shownews/<int:id>', shownews, name="shownews"),
path('mymail', EmailView.as_view(), name="mymail"),
path('bba', bba, name="bba"),
path('cse', cse, name="cse"),
path('eng', eng, name="eng"),
# Reset
path('password-reset/',
auth_views.PasswordResetView.as_view(template_name='password_reset.html', form_class=MyPasswordResetForm), name='password_reset'),
path('password-reset/done/',
auth_views.PasswordResetDoneView.as_view(template_name='password_reset_done.html'), name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name='password_reset_confirm.html', form_class=MySetPasswordForm), name='password_reset_confirm'),
path('password-reset-complete',
auth_views.PasswordResetCompleteView.as_view(template_name='password_reset_done.html'), name='password_reset_complete'),
path('passwordchange/', auth_views.PasswordChangeView.as_view(template_name='passwordchange.html',
form_class=MyPasswordChangeForm, success_url='/passwordchangedone/'), name='passwordchange'),
path('passwordchangedone/', auth_views.PasswordChangeDoneView.as_view(
template_name='passwordchangedone.html'), name='passwordchangedone'),
]
|
[
"amadahmed1234678@gmail.com"
] |
amadahmed1234678@gmail.com
|
ea9cf5a73ecf15898db5b9d376aa74ae41b4575a
|
812f9822ddbfc986f4f230a9e6814f22c7c50e2f
|
/branching/nested.py
|
baba8661b3a2dfe6f2a4379b635ee0c8de297c89
|
[] |
no_license
|
devopsvj/PythonAndMe
|
31b4aa9bade1431d6f13917122dc12bf6a118da6
|
0b1362023960b7c77c79856d4bdef0a58fec1446
|
refs/heads/master
| 2023-07-25T23:06:39.081191
| 2019-01-15T09:50:08
| 2019-01-15T09:50:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
#Program for if statements
#Author:K.Vani
#Date:16.12.1018
#Batch :Devops December 2018
print "Example for if satement"
print "----------------------"
x=input("Enter the Value for x:")
y=input("Enter the value for y:")
z=input("Enter the Value for z:")
a=input("Enter the value for a:")
"""if x>y and x>z and x>a :
print "x is greatest among the all"
else:
print "other number is greateri" """
if x>y and x>z and x>a :
print "x is greatest among the all"
elif y>x and y>z and y>a:
print "y is greatest"
elif z>x and z>y and z>a:
print "z is greatest"
else:
print "a is greatest"
'''if x>y:
if x>z:
if x>a:
print "X is greatest among all 4 numbers"
else:
print "x is not greater than a"
else:
print "X is not greater than z"
else:
print "X is not greater than y"'''
|
[
"vani_kani@hotmail.com"
] |
vani_kani@hotmail.com
|
8e05f8316c34e11f625d1119368230bc445d4dac
|
6e99c57261bcc5ba1f31bffc6ee1d854a00cdd56
|
/examples/anon-bucket-sort/bootstrap.py
|
f447c9c9c8a419ea35d419379552bf8f0ced50c9
|
[
"MIT"
] |
permissive
|
chrisseto/Still
|
5a0249b8eb28956b2be5538d07fb8091e3c54c49
|
3e4df26b824227472e5f487905779deafc76b4dd
|
refs/heads/master
| 2021-01-10T13:43:33.098299
| 2015-10-12T05:22:24
| 2015-10-12T05:22:24
| 43,833,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
import os
import json
import asyncio
from wdim import client
from wdim.client.permissions import Permissions
from wdim.orm import Storable
from wdim.orm import exceptions
from wdim.orm.database import MongoLayer
from wdim.orm.database import EmbeddedElasticSearchLayer
cards_loc = os.path.join(os.path.split(__file__)[0], 'cards.json')
async def main():
mongo_layer = await MongoLayer.connect()
es_layer = await EmbeddedElasticSearchLayer.connect()
assert await Storable.connect(mongo_layer)
assert await client.Document.connect(es_layer)
assert await client.Journal.connect(es_layer >> mongo_layer)
try:
ns = await client.Namespace.create(name='system')
except exceptions.UniqueViolation:
ns = await client.Namespace.get_by_name('system')
try:
collection = await ns.create_collection('users')
except exceptions.UniqueViolation:
pass
try:
ns = await client.Namespace.create(name='cardapp')
except exceptions.UniqueViolation:
ns = await client.Namespace.get_by_name('cardapp')
try:
collection = await ns.create_collection('placements')
except exceptions.UniqueViolation:
collection = await ns.get_collection('placements')
collection._fields['permissions'].__set__(collection, {'anon': Permissions.CREATE}, override=True)
await collection._DATABASE.upsert(collection)
try:
collection = await ns.create_collection('cards')
except exceptions.UniqueViolation:
collection = await ns.get_collection('cards')
collection._fields['permissions'].__set__(collection, {'*': Permissions.READ}, override=True)
await collection._DATABASE.upsert(collection)
with open(cards_loc) as cards:
for card in json.load(cards)['cards']:
try:
entry = await collection.read(card['id'])
blob = await entry.blob
assert blob.data['content'] == card['content']
except AssertionError:
await collection.update(card['id'], {'content': card['content']}, 'sys')
except exceptions.NotFound:
await collection.create(card['id'], {'content': card['content']}, 'sys')
if __name__ == '__main__':
loop = asyncio.get_event_loop().run_until_complete(main())
|
[
"chriskseto@gmail.com"
] |
chriskseto@gmail.com
|
f2c07af575636924d36d2f3874cd92e1c7c7d55f
|
8b8da03245af2d7dd3513bf4ba9a1cf9f4923275
|
/bingads/v12/bulk/entities/bulk_account.py
|
37f2eefd971a7145b4193bbb6a6a02f5e9f27f37
|
[
"MIT"
] |
permissive
|
dev0505/BingAds-Python-SDK
|
42233f4c41209da99ad3e5c494babdd2e13ead69
|
ec8e98f5ff1f472b2ca5801c4dc230039c5b83f9
|
refs/heads/master
| 2020-05-07T19:34:50.000451
| 2019-03-21T05:04:22
| 2019-03-21T05:04:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,325
|
py
|
from bingads.v12.internal.bulk.string_table import _StringTable
from bingads.v12.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v12.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v12.internal.extensions import *
class BulkAccount(_SingleRecordBulkEntity):
""" Represents an account that can be read or written in a bulk file.
Properties of this class and of classes that it is derived from, correspond to fields of the Account record in a bulk file.
For more information, see Account at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, account_id=None, customer_id=None, sync_time=None):
super(BulkAccount, self).__init__()
self._id = account_id
self._customer_id = customer_id
self._sync_time = sync_time
self._msclkid_auto_tagging_enabled = None
self._tracking_url_template = None
@property
def id(self):
""" The identifier of the account.
Corresponds to the 'Id' field in the bulk file.
:return: The identifier of the account.
:rtype: int
"""
return self._id
@property
def customer_id(self):
""" The identifier of the customer that contains the account.
Corresponds to the 'Parent Id' field in the bulk file.
:return: The identifier of the customer that contains the account.
:rtype: int
"""
return self._customer_id
@property
def sync_time(self):
""" The date and time that you last synced your account using the bulk service.
You should keep track of this value in UTC time.
Corresponds to the 'Sync Time' field in the bulk file.
:return: The date and time that you last synced your account using the bulk service.
:rtype: datetime.datetime
"""
return self._sync_time
@property
def msclkid_auto_tagging_enabled(self):
""" Determines whether auto-tagging of the MSCLKID query string parameter is enabled. The MSCLKID is a 32-character GUID that is unique for each ad click.
:return: The msclkid autotag setting of the account
:rtype: bool
"""
return self._msclkid_auto_tagging_enabled
@property
def tracking_url_template(self):
""" The tracking template to use as a default for all URLs in your account.
:return: The tracking template of the account
:rtype: str
"""
return self._tracking_url_template
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.id),
csv_to_field=lambda c, v: setattr(c, '_id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c.customer_id),
csv_to_field=lambda c, v: setattr(c, '_customer_id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.SyncTime,
field_to_csv=lambda c: bulk_datetime_str(c.sync_time),
csv_to_field=lambda c, v: setattr(c, '_sync_time', parse_datetime(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.MSCLKIDAutoTaggingEnabled,
field_to_csv=lambda c: bulk_str(c.msclkid_auto_tagging_enabled),
csv_to_field=lambda c, v: setattr(c, '_msclkid_auto_tagging_enabled', parse_bool(v))
),
_SimpleBulkMapping(
header=_StringTable.TrackingTemplate,
field_to_csv=lambda c: bulk_str(c.tracking_url_template),
csv_to_field=lambda c, v: setattr(c, '_tracking_url_template', v)
),
]
def process_mappings_from_row_values(self, row_values):
row_values.convert_to_entity(self, BulkAccount._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self.convert_to_values(row_values, BulkAccount._MAPPINGS)
def read_additional_data(self, stream_reader):
super(BulkAccount, self).read_additional_data(stream_reader)
|
[
"qitia@microsoft.com"
] |
qitia@microsoft.com
|
2965fe1e208f712e3e99e3ebf5778e835be9fd11
|
910c97ce255f39af7ef949664b4346e8cb5d6a0e
|
/monitorcenter/monitor/mirror/disk.py
|
b149464a149accec9348522008b7e46f181d867e
|
[] |
no_license
|
sun3shines/manager_monitor
|
f3742a4fde95b456f51d0a18feb78f3d4048c560
|
f49d741203d8476f2249a49d90fecc86143ac622
|
refs/heads/master
| 2021-01-17T06:47:14.375088
| 2016-04-29T06:43:05
| 2016-04-29T06:43:05
| 57,361,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
# -*- coding: utf-8 -*-
from managerlib.db.table.dynamic.stat_disk import Disk,insert_disk,update_disk
from managerlib.db.table.dynamic.stat_disk import hid2attrs as DISKhid2attrs
from monitorcenter.monitor.mirror.base import MirrorBase
class MirrorDisk(MirrorBase):
def __init__(self,db,hid):
super(MirrorDisk, self).__init__(db,hid)
@property
def hid2attrs(self):
return DISKhid2attrs
@property
def getClass(self):
return Disk
@property
def emptyObject(self):
return {self.c.hid:self.hid,self.c.id:None,
self.c.timestamp:None,self.c.disk_read_per_sec:None,
self.c.disk_write_per_sec:None,self.c.seq:None}
def insert_db(self,attr):
print 'disk currentseq',self.currentseq
print 'disk currentindex',self.currentindex
timestamp = attr.get(self.c.timestamp)
disk_read_per_sec = attr.get(self.c.disk_read_per_sec)
disk_write_per_sec = attr.get(self.c.disk_write_per_sec)
insert_disk(self.db, self.hid, timestamp, disk_read_per_sec,
disk_write_per_sec,self.currentseq)
def update_db(self,attr,mirror_attr):
pass
print 'disk currentseq',self.currentseq
print 'disk currentindex',self.currentindex
timestamp = attr.get(self.c.timestamp)
disk_read_per_sec = attr.get(self.c.disk_read_per_sec)
disk_write_per_sec = attr.get(self.c.disk_write_per_sec)
cid = mirror_attr.get(self.c.id)
update_disk(self.db, cid, timestamp, disk_read_per_sec,
disk_write_per_sec,self.currentseq)
def update_mirror(self,attr,mirror_attr):
pass
timestamp = attr.get(self.c.timestamp)
disk_read_per_sec = attr.get(self.c.disk_read_per_sec)
disk_write_per_sec = attr.get(self.c.disk_write_per_sec)
mirror_attr.update({self.c.timestamp:timestamp,self.c.disk_read_per_sec:disk_read_per_sec,
self.c.disk_write_per_sec:disk_write_per_sec,self.c.seq:self.currentseq})
|
[
"sun__shines@163.com"
] |
sun__shines@163.com
|
c928d652311163cd67c3071a7f3513d7ce86b16b
|
8ef8e6818c977c26d937d09b46be0d748022ea09
|
/nlp/cloze_test/glm/pytorch/GLMForMultiTokenCloze/base/train/event/log.py
|
ca169d8b4fe1aecee27cdc806aa66540ed65822d
|
[
"Apache-2.0"
] |
permissive
|
Deep-Spark/DeepSparkHub
|
eb5996607e63ccd2c706789f64b3cc0070e7f8ef
|
9d643e88946fc4a24f2d4d073c08b05ea693f4c5
|
refs/heads/master
| 2023-09-01T11:26:49.648759
| 2023-08-25T01:50:18
| 2023-08-25T01:50:18
| 534,133,249
| 7
| 6
|
Apache-2.0
| 2023-03-28T02:54:59
| 2022-09-08T09:07:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,275
|
py
|
import copy
import inspect
import os
import os.path as ospath
from typing import Tuple, Union, Iterable
from torch import Tensor
from torch.cuda.amp import GradScaler
from torch.optim import Optimizer
from config.config_manager import get_properties_from_config
from utils.logging import PerfLogger, LogEvent, PerfLogLevel
from .base import BERT_MODEL
from .base_adapter import BaseTrainingEventAdapter
STACKLEVEL = 4
class TrainingLogger(BaseTrainingEventAdapter):
def __init__(self, config, logger: PerfLogger=None, log_freq: int = 0):
super(TrainingLogger, self).__init__(config)
self.config = config
self.log_freq = log_freq
level = PerfLogLevel.INFO if log_freq > 0 else PerfLogLevel.SUBMITTION
if logger is None:
logger = PerfLogger.get_default_logger(rank=config.local_rank, level=level)
self.logger = logger
self.model = None
self.submitter = None
def launch(self):
self.logger.log(LogEvent.launch_training, "Launch training", stacklevel=STACKLEVEL)
config_path: str = self.config.config
config_dict = get_properties_from_config(self.config)
for key, value in config_dict.items():
if type(value) not in [int, float, str, bool] and not isinstance(value, Iterable):
config_dict[key] = str(value)
# Extract definition of training event
try:
training_event_class = self.config.training_event
if not inspect.isclass(training_event_class):
training_event_class = training_event_class.__class__
training_event_class_define = inspect.getabsfile(training_event_class)
training_event_class_define = training_event_class_define.rsplit(".py", maxsplit=1)[0]
training_event_class_define += ":" + training_event_class.__name__
except:
training_event_class_define = str(self.config.training_event)
config_dict['training_event'] = training_event_class_define
# Like /path/to/proj/submitter/model/config/config_xxx.py
if config_path.startswith("."):
config_path = ospath.abspath(config_path)
config_path_nodes = config_path.rsplit(sep="/", maxsplit=4)
submitter = config_path_nodes[1]
model = config_path_nodes[2]
self.logger.init_logger(submitter=submitter,
model=model,
config_path=config_path,
config=config_dict,
stacklevel=STACKLEVEL)
self.model = model
self.submitter = submitter
def convert_model(self, model: BERT_MODEL):
model_class = type(model)
model_info = dict(
type = model_class.__name__,
module = model_class.__module__ if hasattr(model_class, "__module__") else "None"
)
self._log_event(LogEvent.convert_model, model_info)
def create_optimizer(self, optimizer: Optimizer):
optimizer_class = type(optimizer)
optimizer_info = dict(
type=optimizer_class.__name__,
module=optimizer_class.__module__ if hasattr(optimizer_class, "__module__") else "None"
)
self._log_event(LogEvent.create_optimizer, optimizer_info)
def model_to_fp16(self, model: BERT_MODEL):
fp16_info = dict(
fp16 = self.config.fp16 if hasattr(self.config, "fp16") else False
)
self._log_event(LogEvent.model_to_fp16, fp16_info)
def model_to_ddp(self, model: BERT_MODEL):
model_class = type(model)
model_info = dict(
type=model_class.__name__,
module=model_class.__module__ if hasattr(model_class, "__module__") else None
)
self._log_event(LogEvent.model_to_ddp, model_info)
def on_init_evaluate(self, result: dict):
self._log_event(LogEvent.init_evaluation, result)
def on_evaluate(self, result: dict):
self._log_event(LogEvent.evaluation, result)
def on_init_start(self):
self._log_event(LogEvent.init_start)
def on_init_end(self):
self._log_event(LogEvent.init_end, "Finish initialization")
def on_backward(self, setp, lm_loss, reduced_loss, optimizer, lr_scheduler):
pass
def on_train_begin(self):
self._log_event(LogEvent.train_begin)
def on_train_end(self):
self._log_event(LogEvent.train_end)
def on_epoch_begin(self, epoch: int):
epoch_info = dict(epoch=epoch)
self._log_event(LogEvent.epoch_begin, epoch_info)
def on_epoch_end(self, epoch: int):
epoch_info = dict(epoch=epoch)
self._log_event(LogEvent.epoch_end, epoch_info)
def on_step_begin(self, step: int):
pass
def on_step_end(self, step: int, result: dict=None):
if (self.log_freq <= 0 or step % self.log_freq != 0) and step != 1:
return
if result is None:
step_info = dict()
else:
step_info = copy.copy(result)
step_info['step'] = step
self._log_event(LogEvent.step_end, step_info)
def _log_event(self, event, *args, **kwargs):
self.logger.log(event, stacklevel=STACKLEVEL, *args, **kwargs)
|
[
"jia.guo@iluvatar.ai"
] |
jia.guo@iluvatar.ai
|
3048a911894df5b04b0c4d54932f486ff3601d6e
|
6bc42771a987b503df1866ad3fb8cf3126805806
|
/examples/plot_transforms.py
|
760cabe6244bdbc69117042c21ae3f85915a1207
|
[
"Apache-2.0"
] |
permissive
|
anchandm/fooof
|
82c479ebd08e154cb1a3feedf60804ede81a76cb
|
dcc93b14c4a6987ce7e394696af3221dd2a7bbd6
|
refs/heads/master
| 2020-05-02T07:51:03.064334
| 2019-03-20T03:06:19
| 2019-03-20T03:06:19
| 177,828,211
| 1
| 0
|
Apache-2.0
| 2019-03-26T16:30:30
| 2019-03-26T16:30:29
| null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
"""
Transforming Power Spectra
==========================
Apply transformations to power spectra.
"""
###################################################################################################
# Imports
from fooof.synth.gen import gen_power_spectrum
from fooof.synth.transform import rotate_spectrum
from fooof.plts.spectra import plot_spectra
###################################################################################################
# Generate a synthetic power spectrum
fs, ps = gen_power_spectrum([3, 40], [1, 1], [10, 0.5, 1])
###################################################################################################
# rotate_spectrum
# ---------------
#
# The :func:`rotate_spectrum` function takes in a power spectrum, and rotates the
# power spectrum a specified amount, around a specified frequency point, changing
# the aperiodic exponent of the spectrum.
#
###################################################################################################
# Rotate the power spectrum
nps = rotate_spectrum(fs, ps, 0.25, 20)
###################################################################################################
# Plot the two power spectra
plot_spectra(fs, [ps, nps], log_freqs=True, log_powers=True)
|
[
"tdonoghue@ucsd.edu"
] |
tdonoghue@ucsd.edu
|
7e98137ff59ad45092492d5017693683fffe0cc2
|
0e51815b524229c7562572f10d178eb446cb7715
|
/project18/app18/apps.py
|
2929b85d78d69c8157c514f030b31065a05443dd
|
[] |
no_license
|
Eswar0407/Django-10Am
|
4d38cc795d7233bf5b6d1792755b018584a7c627
|
c668b96dd69c72daba09bc944134302073827fae
|
refs/heads/master
| 2023-07-16T23:52:52.820204
| 2021-09-05T17:57:14
| 2021-09-05T17:57:14
| 374,605,139
| 0
| 0
| null | 2021-06-13T16:20:40
| 2021-06-07T09:16:52
|
Python
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
from django.apps import AppConfig
class App18Config(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'app18'
|
[
"eswarbablu@gmail.com"
] |
eswarbablu@gmail.com
|
f8ddf2cb07624eabe3a281e2d346f7452b28d233
|
80831d77ef6fc3b485be80501b73ccb30ce5e444
|
/networkapi/api_equipment/tests/sanity/test_equipment_get.py
|
56e5f87ba6ac8d169743fd61137b14ced95d0430
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
globocom/GloboNetworkAPI
|
e2fdf5a9e6070359e90801bf3e45c2d499f199c5
|
eb27e1d977a1c4bb1fee8fb51b8d8050c64696d9
|
refs/heads/master
| 2023-06-25T21:34:04.923940
| 2023-05-29T12:07:20
| 2023-05-29T12:07:20
| 22,734,387
| 86
| 74
|
Apache-2.0
| 2023-05-29T12:07:21
| 2014-08-07T19:47:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
# -*- coding: utf-8 -*-
import logging
from django.test.client import Client
from networkapi.test.test_case import NetworkApiTestCase
log = logging.getLogger(__name__)
class EquipmentGetTestCase(NetworkApiTestCase):
fixtures = [
'networkapi/system/fixtures/initial_variables.json',
'networkapi/usuario/fixtures/initial_usuario.json',
'networkapi/grupo/fixtures/initial_ugrupo.json',
'networkapi/usuario/fixtures/initial_usuariogrupo.json',
'networkapi/grupo/fixtures/initial_permissions.json',
'networkapi/grupo/fixtures/initial_permissoes_administrativas.json',
'networkapi/api_equipment/fixtures/initial_pre_equipment.json',
'networkapi/api_equipment/fixtures/initial_base.json',
]
def setUp(self):
self.client = Client()
def tearDown(self):
pass
def test_get_success_list_equipment(self):
"""
Test of success to get equipment list
"""
response = self.client.get(
'/api/v3/equipment/',
content_type='application/json',
HTTP_AUTHORIZATION=self.get_http_authorization('test'))
self.assertEqual(
200,
response.status_code,
'Status code should be 200 and was %s' % response.status_code
)
|
[
"ederson.brilhante@corp.globo.com"
] |
ederson.brilhante@corp.globo.com
|
891121b9606027367e79edf296778c24e1863184
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Difference/trend_ConstantTrend/cycle_12/ar_12/test_artificial_1024_Difference_ConstantTrend_12_12_20.py
|
255ad8d8fc80ccb9aa8b7a9077af1162adb8c95e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 272
|
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
53aa879f095b39f36320336ad01e799973484d25
|
2d0bada349646b801a69c542407279cc7bc25013
|
/docsrc/source/doxygen/pydoc/execute_async.py
|
782a192a068fbb6bb04256d4ec4f587d475fa660
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 472
|
py
|
"""!
@brief Executes the runner. This is a blocking function.
@param inputs : List[vart.TensorBuffer], A list of vart.TensorBuffer containing the input data for inference.
@param outputs : List[vart.TensorBuffer], A list of vart.TensorBuffer which will be filled with output data.
@return tuple[jobid, status] status 0 for exit successfully, others for customized warnings or errors.
"""
def execute_async( inputs, outputs):
pass
|
[
"do-not-reply@gitenterprise.xilinx.com"
] |
do-not-reply@gitenterprise.xilinx.com
|
7a88d8fa4488ba5fa2c2b4c39da14e4c4686d172
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2796/60692/251559.py
|
a8bf87a7e07b8477d2a22079daf8e9c61c1ab118
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
nums = int(input())
list1 = input().split(" ")
list1 = [int(i) for i in list1]
def isPower(num):
low = -num
high = num
while low < high:
mid = (low + high) // 2
if mid ** 2 < num:
low = mid + 1
elif mid ** 2 > num:
high = mid - 1
else:
return True
return False
max_num = list1[0]
for i in list1:
if not isPower(i) and i > max_num:
max_num = i
print(max_num)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d690476d5c1f08c02cc4bbdc52aee63f034af167
|
aaa204ad7f134b526593c785eaa739bff9fc4d2a
|
/airflow/providers/vertica/operators/vertica.py
|
50463a7771ececc01a00331495f8974e4a7ac3e7
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
cfei18/incubator-airflow
|
913b40efa3d9f1fdfc5e299ce2693492c9a92dd4
|
ffb2078eb5546420864229cdc6ee361f89cab7bd
|
refs/heads/master
| 2022-09-28T14:44:04.250367
| 2022-09-19T16:50:23
| 2022-09-19T16:50:23
| 88,665,367
| 0
| 1
|
Apache-2.0
| 2021-02-05T16:29:42
| 2017-04-18T20:00:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,050
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Iterable, Sequence
from airflow.models import BaseOperator
from airflow.providers.vertica.hooks.vertica import VerticaHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class VerticaOperator(BaseOperator):
"""
Executes sql code in a specific Vertica database.
:param vertica_conn_id: reference to a specific Vertica database
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
Template references are recognized by str ending in '.sql'
"""
template_fields: Sequence[str] = ('sql',)
template_ext: Sequence[str] = ('.sql',)
template_fields_renderers = {'sql': 'sql'}
ui_color = '#b4e0ff'
def __init__(
self, *, sql: str | Iterable[str], vertica_conn_id: str = 'vertica_default', **kwargs: Any
) -> None:
super().__init__(**kwargs)
self.vertica_conn_id = vertica_conn_id
self.sql = sql
def execute(self, context: Context) -> None:
self.log.info('Executing: %s', self.sql)
hook = VerticaHook(vertica_conn_id=self.vertica_conn_id, log_sql=False)
hook.run(sql=self.sql)
|
[
"noreply@github.com"
] |
cfei18.noreply@github.com
|
7bb26aa80818a9ed70dcae325f932f5178923f9a
|
2a4ad073755ff447926e44b7c2e0b56b5ded37d2
|
/NowCoder/36S_两个链表的第一个公共结点.py
|
52164b5ccc6a09928f661889ab429d8a262f6a87
|
[] |
no_license
|
wcb2213/Learning_notes
|
3a9b3fdb7df5c6844a9031db8dd7e9dd858e093c
|
d481e1754c15c91557027bee872f4d97da3c0fca
|
refs/heads/master
| 2021-07-06T15:54:56.199655
| 2020-09-04T14:05:50
| 2020-09-04T14:05:50
| 174,832,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
#!/usr/bin/env/ python
# -*- coding:utf-8 -*-
# Created by: Vanish
# Created on: 2019/3/22
# 24ms
# 输入两个链表,找出它们的第一个公共结点。
# 公共节点具有相同的地址
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def FindFirstCommonNode(self, pHead1, pHead2):
# write code here
p1,p2=pHead1,pHead2
while p1!=p2:
p1=p1.next if p1 else pHead2
p2=p2.next if p2 else pHead1
# 不用下面这个,因为可能不存在公共节点,需要None
# p1 = p1.next if p1.next else pHead2
return p1
|
[
"wcb2213@163.com"
] |
wcb2213@163.com
|
d2e724651959af6b3b9bbfed7c6ca532e920040d
|
c9d5ca04f42226d3c6b10f2318aebacfb43cfda7
|
/tests/test_algebra/test_vector.py
|
3ff9a7d064ee6f944d468e6db8a6a538ef3349e5
|
[
"MIT"
] |
permissive
|
ioztelli/ezdxf
|
cfb81deb7b666fa4e0ed32f1b0753fbe5faa57ee
|
59605dc34c44fe4ad3a8162d6520a3b4776582b4
|
refs/heads/master
| 2020-04-21T18:37:37.022364
| 2018-11-28T12:56:19
| 2018-11-28T12:56:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,141
|
py
|
import sys
import pytest
import math
from ezdxf.algebra.vector import Vector, is_close
PY3 = sys.version_info.major > 2
def test_init_no_params():
v = Vector()
assert v == (0, 0, 0)
assert v == Vector()
def test_init_one_param():
v = Vector((2, 3))
assert v == (2, 3) # z is 0.
v = Vector((2, 3, 4))
assert v == (2, 3, 4) # z is 0.
def test_init_two_params():
v = Vector(1, 2)
assert v == (1, 2) # z is 0.
v = Vector(5, 6, 7) - Vector(1, 1, 1)
assert v == (4, 5, 6)
v = Vector.from_deg_angle(0)
assert v == (1, 0)
length, angle = 7, 45
v = Vector.from_deg_angle(angle, length)
x = math.cos(math.radians(angle)) * length
y = math.sin(math.radians(angle)) * length
assert v == (x, y)
def test_init_three_params():
v = Vector(1, 2, 3)
assert v == (1, 2, 3)
def test_from_angle():
angle = math.radians(50)
length = 3.
assert Vector.from_rad_angle(angle, length) == (math.cos(angle) * length, math.sin(angle) * length, 0)
def test_vector_as_tuple():
v = Vector(1, 2, 3)
assert v[0] == 1
assert v[1] == 2
assert v[2] == 3
assert tuple(v) == (1, 2, 3)
assert isinstance(v[:2], tuple)
assert v[:2] == (1, 2)
assert v[1:] == (2, 3)
assert isinstance(v.xyz, tuple)
assert v.xyz == (1, 2, 3)
def test_iter():
assert sum(Vector(1, 2, 3)) == 6
def test_deep_copy():
import copy
v = Vector(1, 2, 3)
l1 = [v, v, v]
l2 = copy.copy(l1)
assert l2[0] is l2[1]
assert l2[1] is l2[2]
assert l2[0] is v
l3 = copy.deepcopy(l1)
assert l3[0] is l3[1]
assert l3[1] is l3[2]
assert l3[0] is not v
def test_get_angle():
v = Vector(3, 3)
assert is_close(v.angle_deg, 45)
assert is_close(v.angle_rad, math.radians(45))
def test_spatial_angle():
v = Vector(3, 3, 0)
assert is_close(v.spatial_angle_deg, 45)
assert is_close(v.spatial_angle_rad, math.radians(45))
def test_compare_vectors():
v1 = Vector(1, 2, 3)
# compare to tuple
assert v1 == (1, 2, 3)
# compare tuple to vector
assert (1, 2, 3) == v1
v2 = Vector(2, 3, 4)
assert v2 > v1
assert v1 < v2
def test_xy():
assert Vector(1, 2, 3).xy == Vector(1, 2)
def test_is_null():
v = Vector()
assert v.is_null
v1 = Vector(23.56678, 56678.56778, 2.56677) * (1.0 / 14.5667)
v2 = Vector(23.56678, 56678.56778, 2.56677) / 14.5667
result = v2 - v1
assert Vector(0, 0, 0).is_null
@pytest.mark.skipif(not PY3, reason="__bool__ not supported")
def test_bool():
v = Vector()
assert bool(v) is False
v1 = Vector(23.56678, 56678.56778, 2.56677) * (1.0 / 14.5667)
v2 = Vector(23.56678, 56678.56778, 2.56677) / 14.5667
result = v2 - v1
assert bool(result) is False
# actual precision is abs_tol=1e-9
assert not Vector(1e-8, 0, 0).is_null
def test_magnitude():
v = Vector(3, 4, 5)
assert is_close(abs(v), 7.0710678118654755)
assert is_close(v.magnitude, 7.0710678118654755)
def test_magnitude_square():
v = Vector(3, 4, 5)
assert is_close(v.magnitude_square, 50)
def test_normalize():
v = Vector(2, 0, 0)
assert v.normalize() == (1, 0, 0)
def test_normalize_to_length():
v = Vector(2, 0, 0)
assert v.normalize(4) == (4, 0, 0)
def test_orthogonal_ccw():
v = Vector(3, 4)
assert v.orthogonal() == (-4, 3)
def test_orthogonal_cw():
v = Vector(3, 4)
assert v.orthogonal(False) == (4, -3)
def test_negative():
v = Vector(2, 3, 4)
assert -v == (-2, -3, -4)
def test_add_scalar():
v = Vector(2, 3, 4)
assert v + 3 == (5, 6, 7)
def test_iadd_scalar():
v = Vector(2, 3, 4)
v += 3
assert v == (5, 6, 7)
def test_sub_scalar():
v = Vector(2, 3, 4)
assert v - 3 == (-1, 0, 1)
def test_isub_scalar():
v = Vector(2, 3, 4)
v -= 3
assert v == (-1, 0, 1)
def test_add_vector():
v = Vector(2, 3, 4)
assert v + (7, 7, 7) == (9, 10, 11)
def test_iadd_vector():
v = Vector(2, 3, 4)
v += (7, 7, 7)
assert v == (9, 10, 11)
def test_radd_vector():
v = Vector(2, 3, 4)
assert (7, 7, 7) + v == (9, 10, 11)
def test_sub_vector():
v = Vector(2, 3, 4)
assert v - (7, 7, 7) == (-5, -4, -3)
def test_isub_vector():
v = Vector(2, 3, 4)
v -= (7, 7, 7)
assert v == (-5, -4, -3)
def test_rsub_vector():
v = Vector(2, 3, 4)
assert (7, 7, 7) - v == (5, 4, 3)
def test_mul_scalar():
v = Vector(2, 3, 4)
assert v * 2 == (4, 6, 8)
def test_imul_scalar():
v = Vector(2, 3, 4)
v *= 2
assert v == (4, 6, 8)
def test_rmul_scalar():
v = Vector(2, 3, 4)
assert 2 * v == (4, 6, 8)
def test_div_scalar():
v = Vector(2, 3, 4)
assert v / 2 == (1, 1.5, 2)
def test_idiv_scalar():
v = Vector(2, 3, 4)
v /= 2
assert v == (1, 1.5, 2)
def test_rdiv_scalar():
v = Vector(2, 3, 4)
assert 2 / v == (1, 0.66666666667, 0.5)
def test_dot_product():
v1 = Vector(2, 7, 1)
v2 = Vector(3, 9, 8)
assert is_close(v1.dot(v2), 77)
def test_angle_deg():
assert is_close(Vector(0, 1).angle_deg, 90)
assert is_close(Vector(0, -1).angle_deg, -90)
assert is_close(Vector(1, 1).angle_deg, 45)
assert is_close(Vector(-1, 1).angle_deg, 135)
def test_angle_between():
v1 = Vector(0, 1)
v2 = Vector(1, 1)
angle = v1.angle_between(v2)
assert is_close(angle, math.pi / 4)
# reverse order, same result
angle = v2.angle_between(v1)
assert is_close(angle, math.pi / 4)
def test_cross_product():
v1 = Vector(2, 7, 9)
v2 = Vector(3, 9, 1)
assert v1.cross(v2) == (-74, 25, -3)
def test_rot_z():
assert Vector(2, 2, 7).rot_z_deg(90) == (-2, 2, 7)
def test_lerp():
v1 = Vector(1, 1, 1)
v2 = Vector(4, 4, 4)
assert v1.lerp(v2, .5) == (2.5, 2.5, 2.5)
assert v1.lerp(v2, 0) == (1, 1, 1)
assert v1.lerp(v2, 1) == (4, 4, 4)
def test_replace():
v = Vector(1, 2, 3)
assert v.replace(x=7) == (7, 2, 3)
assert v.replace(y=7) == (1, 7, 3)
assert v.replace(z=7) == (1, 2, 7)
assert v.replace(x=7, z=7) == (7, 2, 7)
|
[
"mozman@gmx.at"
] |
mozman@gmx.at
|
26d60373ba20e7c396d6b3c3eb3d41e21eca9b0c
|
284f4f56aed56573eb5516aa67c99bf41e595522
|
/Leetcode/Queue & Stack/p1337.py
|
d64c819ab7d4db49c53ed975f32e633aa2a8b4c6
|
[] |
no_license
|
rohangoli/PythonAdvanced
|
537a05eff9ec305a6ec32fa2d0962a64976cd097
|
6448a5f0d82c7e951b5e476638e15a3c34966cd9
|
refs/heads/develop
| 2023-07-20T04:33:50.764104
| 2023-07-14T04:04:18
| 2023-07-14T04:04:18
| 126,811,520
| 0
| 0
| null | 2022-06-10T23:07:10
| 2018-03-26T10:20:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,649
|
py
|
## Design Circular Queue
# Example 1:
# Input
# ["MyCircularQueue", "enQueue", "enQueue", "enQueue", "enQueue", "Rear", "isFull", "deQueue", "enQueue", "Rear"]
# [[3], [1], [2], [3], [4], [], [], [], [4], []]
# Output
# [null, true, true, true, false, 3, true, true, true, 4]
# Explanation
# MyCircularQueue myCircularQueue = new MyCircularQueue(3);
# myCircularQueue.enQueue(1); // return True
# myCircularQueue.enQueue(2); // return True
# myCircularQueue.enQueue(3); // return True
# myCircularQueue.enQueue(4); // return False
# myCircularQueue.Rear(); // return 3
# myCircularQueue.isFull(); // return True
# myCircularQueue.deQueue(); // return True
# myCircularQueue.enQueue(4); // return True
# myCircularQueue.Rear(); // return 4
## Rohan's Implementation
class MyCircularQueue:
def __init__(self, k: int):
self.arr = [None for _ in range(k)]
self.len = k
self.head = None
self.tail = None
self.debug = False
if self.debug:
print('-'*35)
print(self.arr)
def enQueue(self, value: int) -> bool:
if self.debug:
print("Current Capacity: ",self.currCapacity())
print("Head: {}, Tail: {}".format(self.head, self.tail))
if self.isEmpty():
self.head = 0
self.tail = 0
self.arr[self.tail] = value
elif self.isFull():
if self.debug: print("enQueue: FULL\n")
return False
else:
self.tail+=1
if not self.tail < self.len:
self.tail=0
self.arr[self.tail] = value
if self.debug:
print("Head: {}, Tail: {}".format(self.head, self.tail))
print("enQueue: ",self.arr)
print()
return True
def deQueue(self) -> bool:
if self.debug:
print("Current Capacity: ",self.currCapacity())
print("Head: {}, Tail: {}".format(self.head, self.tail))
if self.isEmpty():
self.head = None
self.tail = None
if self.debug: print("deQueue: EMPTY\n")
return False
elif self.head==self.tail:
self.arr[self.head] = None
self.head = self.tail = None
else:
self.arr[self.head] = None
self.head +=1
if not self.head < self.len:
self.head = 0
if self.debug:
print("Head: {}, Tail: {}".format(self.head, self.tail))
print("deQueue: ",self.arr)
print()
return True
def Front(self) -> int:
if not self.isEmpty() and self.head!=None:
return self.arr[self.head]
else:
return -1
def Rear(self) -> int:
if not self.isEmpty() and self.tail!=None:
return self.arr[self.tail]
else:
return -1
def isEmpty(self) -> bool:
return self.currCapacity()==0
def isFull(self) -> bool:
return self.currCapacity() == self.len
def currCapacity(self) -> int:
if self.tail==self.head==None:
return 0
elif self.tail>=self.head:
return (self.tail - self.head + 1)
else:
return (self.len - self.head) + (self.tail + 1)
## Better Leetcode Implementation
class MyCircularQueue:
def __init__(self, k: int):
self.queue = [0]*k
self.headIndex = 0
self.count = 0
self.capacity = k
def enQueue(self, value: int) -> bool:
if self.count == self.capacity:
return False
self.queue[(self.headIndex + self.count) % self.capacity] = value
self.count += 1
return True
def deQueue(self) -> bool:
if self.count == 0:
return False
self.headIndex = (self.headIndex + 1) % self.capacity
self.count -= 1
return True
def Front(self) -> int:
if self.count == 0:
return -1
return self.queue[self.headIndex]
def Rear(self) -> int:
if self.count == 0:
return -1
return self.queue[(self.headIndex + self.count - 1) % self.capacity]
def isEmpty(self) -> bool:
return self.count == 0
def isFull(self) -> bool:
return self.count == self.capacity
# Your MyCircularQueue object will be instantiated and called as such:
# obj = MyCircularQueue(k)
# param_1 = obj.enQueue(value)
# param_2 = obj.deQueue()
# param_3 = obj.Front()
# param_4 = obj.Rear()
# param_5 = obj.isEmpty()
# param_6 = obj.isFull()
|
[
"rohanr27@gmail.com"
] |
rohanr27@gmail.com
|
f23f1f92ebec803ab3cfd711f847c4a7e60f5e61
|
cf652cb90f9d6b22b5943e7d025af631214a904d
|
/contrib/build-wine/deterministic.spec
|
01dff8d60d15b6e08d3e971bbaa7ad2890eb9531
|
[
"MIT"
] |
permissive
|
ddude1/TestLite
|
02919c68013d2ede9195d618d94260b842a5e292
|
3f3c00e4ef03dd9b23b99b02f9a8895da8d65aef
|
refs/heads/master
| 2022-12-11T12:22:25.029101
| 2018-06-13T14:11:51
| 2018-06-13T14:11:51
| 136,489,568
| 0
| 0
|
MIT
| 2022-09-23T21:47:03
| 2018-06-07T14:31:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,262
|
spec
|
# -*- mode: python -*-
import sys
for i, x in enumerate(sys.argv):
if x == '--name':
cmdline_name = sys.argv[i+1]
break
else:
raise BaseException('no name')
hiddenimports = [
'lib',
'lib.base_wizard',
'lib.plot',
'lib.qrscanner',
'lib.websockets',
'gui.qt',
'mnemonic', # required by python-trezor
'plugins',
'plugins.hw_wallet.qt',
'plugins.audio_modem.qt',
'plugins.cosigner_pool.qt',
'plugins.digitalbitbox.qt',
'plugins.email_requests.qt',
'plugins.keepkey.qt',
'plugins.labels.qt',
'plugins.trezor.qt',
'plugins.ledger.qt',
'plugins.virtualkeyboard.qt',
]
datas = [
('lib/servers.json', 'electrum_xgox'),
('lib/servers_testnet.json', 'electrum_xgox'),
('lib/currencies.json', 'electrum_xgox'),
('lib/locale', 'electrum_xgox/locale'),
('lib/wordlist', 'electrum_xgox/wordlist'),
]
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-remove-tkinter-tcl
sys.modules['FixTk'] = None
excludes = ['FixTk', 'tcl', 'tk', '_tkinter', 'tkinter', 'Tkinter']
a = Analysis(['electrum-xgox'],
pathex=['plugins'],
hiddenimports=hiddenimports,
datas=datas,
excludes=excludes,
runtime_hooks=['pyi_runtimehook.py'])
# http://stackoverflow.com/questions/19055089/
for d in a.datas:
if 'pyconfig' in d[0]:
a.datas.remove(d)
break
# Add TOC to electrum_xgox, electrum_xgox_gui, electrum_xgox_plugins
for p in sorted(a.pure):
if p[0].startswith('lib') and p[2] == 'PYMODULE':
a.pure += [('electrum_xgox%s' % p[0][3:] , p[1], p[2])]
if p[0].startswith('gui') and p[2] == 'PYMODULE':
a.pure += [('electrum_xgox_gui%s' % p[0][3:] , p[1], p[2])]
if p[0].startswith('plugins') and p[2] == 'PYMODULE':
a.pure += [('electrum_xgox_plugins%s' % p[0][7:] , p[1], p[2])]
pyz = PYZ(a.pure)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
debug=False,
strip=False,
upx=False,
console=False,
icon='icons/electrum-xgox.ico',
name=os.path.join('build\\pyi.win32\\electrum', cmdline_name))
# exe with console output
conexe = EXE(pyz,
a.scripts,
exclude_binaries=True,
debug=False,
strip=False,
upx=False,
console=True,
icon='icons/electrum-xgox.ico',
name=os.path.join('build\\pyi.win32\\electrum',
'console-%s' % cmdline_name))
# trezorctl separate executable
tctl_a = Analysis(['C:/Python34/Scripts/trezorctl'],
hiddenimports=['pkgutil'],
excludes=excludes,
runtime_hooks=['pyi_tctl_runtimehook.py'])
tctl_pyz = PYZ(tctl_a.pure)
tctl_exe = EXE(tctl_pyz,
tctl_a.scripts,
exclude_binaries=True,
debug=False,
strip=False,
upx=False,
console=True,
name=os.path.join('build\\pyi.win32\\electrum', 'trezorctl.exe'))
coll = COLLECT(exe, conexe, tctl_exe,
a.binaries,
a.datas,
strip=False,
upx=False,
name=os.path.join('dist', 'electrum-xgox'))
|
[
"32743542+ddude1@users.noreply.github.com"
] |
32743542+ddude1@users.noreply.github.com
|
67478ebc3d95f581efd04b7e8f9785e8c7356d45
|
7afcf3cf0f55ecc255aabdda3b90c44528f53b50
|
/Crawler/ai/ai/ai/items.py
|
ed1681a61ab48cf04819544016e5a09308405564
|
[] |
no_license
|
entscheidsuche/scraper
|
368c6ac8fd14e15116c26f936f32d2ed0acac2ae
|
b9fafd3f1c2600a78471d4e4c466250ab11a8f33
|
refs/heads/master
| 2023-04-05T22:09:20.270314
| 2021-04-18T19:29:24
| 2021-04-18T19:29:24
| 264,894,732
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AiItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field()
link = scrapy.Field()
Kanton = scrapy.Field()
Sprache = scrapy.Field()
pass
|
[
"joern@erbguth.net"
] |
joern@erbguth.net
|
354f84a91bfa9fb50948ba4a5b0b9fca0a5a2e8d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/784.py
|
c996bf19a378d7bdffef6baad4ca8a6bd272cf74
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
f = file('cipher')
string_mapping = []
char_mapping = {}
x = int(f.readline())
for i in range(x):
string_mapping.append([f.readline()])
for i in range(x):
string_mapping[i].append(f.readline())
for i in range(x):
for j in range(len(string_mapping[i][0])):
x1 = string_mapping[i][0][j]
x2 = string_mapping[i][1][j]
char_mapping[x1] = x2
char_mapping['z'] = 'q'
char_mapping['q'] = 'z'
#print sorted(char_mapping.keys())
f = file('input')
x = int(f.readline())
for i in range(x):
s = f.readline()
s_list = [ char_mapping[x] for x in s]
s_out = "".join(s_list)
print "Case #%s: %s" % (str(i + 1), s_out),
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
55e705dfe0e11bc045a55ecd85bc6dc9f54a1d2b
|
455cab19b8c1b7fa43d97e93137834431ab479b1
|
/hw0/1a.py
|
d88e062b989e94269b9eb3674a8829010c0dbae4
|
[] |
no_license
|
janeon/hsieh-stat339
|
f5c731503d17798be4bd18e33601e9060c7d5b88
|
8927e45da48b2a867e10cd94ac138b76c0767843
|
refs/heads/master
| 2021-01-03T04:30:39.101070
| 2020-02-12T04:04:58
| 2020-02-12T04:04:58
| 239,923,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(10)
y = np.floor(x)
plt.xlabel('x axis')
plt.ylabel('y axis')
print(type(plt.step(x, y)))
plt.show()
|
[
"jhsieh@oberlin.edu"
] |
jhsieh@oberlin.edu
|
fcd8f4b935de79b3970106ec58f17b968edefb80
|
a46897f8610749db614e53208c39b2f0308011b3
|
/usage_report.py
|
391c50d21343498a8efc1599eae83b780b32f90b
|
[] |
no_license
|
seidelj/uprogram-math
|
4bd269c2a2fe4218529f4369555cbd51e04b4363
|
4a73ec6db1de48624046abe321d483f094e6ee1c
|
refs/heads/master
| 2021-01-19T03:25:19.077484
| 2016-10-15T01:15:47
| 2016-10-15T01:15:47
| 52,285,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
import website.wsgi
from request.models import Request
from mathtutor.models import Student
import csv
csvFile = open("access.csv", 'w')
writer = csv.writer(csvFile, csv.excel)
columns = [f.name for f in Request._meta.get_fields()]
writer.writerow(columns)
for r in Request.objects.all():
if r.user_id:
row = []
for c in columns:
row.append(getattr(r, c))
writer.writerow(row)
|
[
"seidel.jp@gmail.com"
] |
seidel.jp@gmail.com
|
d5e9ddbf4cd946d176d9f9410b47c88c0bc6f578
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/feature/vt/gui/task/ForceApplyMarkupItemTask.pyi
|
a0d1f4c08ef41ab1d8ca046c6f84079e18d142a4
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
pyi
|
import ghidra.feature.vt.gui.task
import ghidra.util.task
import java.lang
class ForceApplyMarkupItemTask(ghidra.feature.vt.gui.task.ApplyMarkupItemTask):
def __init__(self, __a0: ghidra.feature.vt.api.main.VTSession, __a1: java.util.Collection, __a2: ghidra.framework.options.ToolOptions): ...
def addTaskListener(self, __a0: ghidra.util.task.TaskListener) -> None: ...
def canCancel(self) -> bool: ...
def cancel(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getErrorDetails(self) -> unicode: ...
def getStatusTextAlignment(self) -> int: ...
def getTaskTitle(self) -> unicode: ...
def hasErrors(self) -> bool: ...
def hasProgress(self) -> bool: ...
def hashCode(self) -> int: ...
def isModal(self) -> bool: ...
def logErrors(self) -> None: ...
def monitoredRun(self, __a0: ghidra.util.task.TaskMonitor) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def run(self, __a0: ghidra.util.task.TaskMonitor) -> None: ...
def setHasProgress(self, __a0: bool) -> None: ...
def showErrors(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def wasCancelled(self) -> bool: ...
def wasSuccessfull(self) -> bool: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
a3e82a1f2efc481e4336c6020234f9bf90f84a93
|
f11600b9a256bf6a2b584d127faddc27a0f0b474
|
/normal/809.py
|
4bae6be20deb773c0675bd27ac9141f4cb83b3b5
|
[] |
no_license
|
longhao54/leetcode
|
9c1f0ce4ca505ec33640dd9b334bae906acd2db5
|
d156c6a13c89727f80ed6244cae40574395ecf34
|
refs/heads/master
| 2022-10-24T07:40:47.242861
| 2022-10-20T08:50:52
| 2022-10-20T08:50:52
| 196,952,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
class Solution(object):
def expressiveWords(self, S, words):
def RLE(S):
return zip(*[(k, len(list(grp)))
for k, grp in itertools.groupby(S)])
R, count = RLE(S)
ans = 0
for word in words:
R2, count2 = RLE(word)
if R2 != R: continue
ans += all(c1 >= max(c2, 3) or c1 == c2
for c1, c2 in zip(count, count2))
return ans
|
[
"jinlha@jiedaibao.com"
] |
jinlha@jiedaibao.com
|
566430d6408c4043b198fd0bf8b7b62bdb08e899
|
665add8c434df0445294931aac7098e8a0fa605b
|
/ver1/ch10/echoClient.py
|
2d9a1570d99f6a3819cc5cd98efcebde98e3e02a
|
[] |
no_license
|
swkim01/RaspberryPiWithIOT
|
f43cef567ca48f2ce9deec0cba87fa801dcbcbe2
|
d4b5c9aeb09490429a551f357d3c83ab04deed82
|
refs/heads/master
| 2023-04-14T20:04:33.924243
| 2023-04-12T05:15:32
| 2023-04-12T05:15:32
| 48,477,439
| 4
| 14
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
import socket
import sys
HOST, PORT = "<arduino ip>", 7
data = " ".join(sys.argv[1:])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((HOST, PORT))
sock.sendall(data + '\n')
received = sock.recv(1024)
finally:
sock.close()
print "Sent: {}".format(data)
print "Received: {}".format(received)
|
[
"swkim01@gmail.com"
] |
swkim01@gmail.com
|
c4dae2d58da8f29eacf1744564d8c1d490614f4b
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_1484496_0/Python/zifmia/equalsums.py
|
070fef3e61ff34fceaa4a7e4767191dd421f5cee
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
# google code jam 2012 round 1b
# equal sums
import itertools
def numlist(s, n, v):
answer =""
for i in range(n):
if (1<<i & v):
answer += " " + str(s[i])
return answer[1:] + "\n"
f = open("c:\\users\\James\\Downloads\\C-small-attempt0.in", "r")
#f = open("equal.in","r")
fout= open("equal.out", "w")
t = int(f.readline())
for t1 in xrange(t):
# m chars already typed out of n total
s = map(int, f.readline().split() )
n = s[0]
s = s[1:]
sums = {}
fout.write("Case #%d:\n" % (t1+1))
found = False
for v in xrange(2**n):
total = 0
for i in range(n):
if (1<<i & v):
total += s[i]
if total in sums:
found = True
break
else:
sums[total] = v
if found:
print numlist(s, n, v),
fout.write(numlist(s, n, v))
print numlist(s, n, sums[total]),
fout.write(numlist(s, n, sums[total]))
else:
fout.write("Impossible\n")
fout.close()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
290788da9c02368ec2297cca7a6283e7fc5b95a1
|
9f6e256b96f20fb4f9e13c42066b49d43d22f47f
|
/docs/extensions/details.py
|
231d1c55eabc4bda563a504930f732a7da5cf3f3
|
[
"MIT"
] |
permissive
|
bijij/Donphan
|
4daa75fee451e6b911e4e24f2c14c27b56102ee3
|
c583332d290473c19a8cdd9f7c14cf17a7d17de1
|
refs/heads/master
| 2023-08-16T21:32:35.382536
| 2023-08-13T11:52:15
| 2023-08-13T11:52:15
| 188,970,855
| 19
| 3
|
NOASSERTION
| 2022-12-22T15:23:47
| 2019-05-28T06:49:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
class details(nodes.General, nodes.Element):
pass
class summary(nodes.General, nodes.Element):
pass
def visit_details_node(self, node):
self.body.append(self.starttag(node, "details", CLASS=node.attributes.get("class", "")))
def visit_summary_node(self, node):
self.body.append(self.starttag(node, "summary", CLASS=node.attributes.get("summary-class", "")))
self.body.append(node.rawsource)
def depart_details_node(self, node):
self.body.append("</details>\n")
def depart_summary_node(self, node):
self.body.append("</summary>")
class DetailsDirective(Directive):
final_argument_whitespace = True
optional_arguments = 1
option_spec = {
"class": directives.class_option,
"summary-class": directives.class_option,
}
has_content = True
def run(self):
set_classes(self.options)
self.assert_has_content()
text = "\n".join(self.content)
node = details(text, **self.options)
if self.arguments:
summary_node = summary(self.arguments[0], **self.options)
summary_node.source, summary_node.line = self.state_machine.get_source_and_line(self.lineno)
node += summary_node
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
def setup(app):
app.add_node(details, html=(visit_details_node, depart_details_node))
app.add_node(summary, html=(visit_summary_node, depart_summary_node))
app.add_directive("details", DetailsDirective)
|
[
"josh.ja.butt@gmail.com"
] |
josh.ja.butt@gmail.com
|
17dd1dfa08c75cbef333ad95c13333ffad288d3b
|
325bee18d3a8b5de183118d02c480e562f6acba8
|
/HongKong/HCK_l/build/lib/HCK_l/settings.py
|
5039699bcf1b52e9bf5fae7767ce72cd5c06eca0
|
[] |
no_license
|
waynecanfly/spiderItem
|
fc07af6921493fcfc21437c464c6433d247abad3
|
1960efaad0d995e83e8cf85e58e1db029e49fa56
|
refs/heads/master
| 2022-11-14T16:35:42.855901
| 2019-10-25T03:43:57
| 2019-10-25T03:43:57
| 193,424,274
| 4
| 0
| null | 2022-11-04T19:16:15
| 2019-06-24T03:00:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,260
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for ChinaSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'HCK_l'
SPIDER_MODULES = ['HCK_l.spiders']
NEWSPIDER_MODULE = 'HCK_l.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ChinaSpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
#ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# "Content-Type": "application/json",
# "X-Requested-With": "XMLHttpRequest"
# 'Accept-Language': 'en',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
# 'ChinaSpider.middlewares.ChinaspiderSpiderMiddleware': 543,
# 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'ChinaSpider.middlewares.MyCustomDownloaderMiddleware': 543,
'HCK_l.MyMiddleware.SeleniumChrome': 543,
# 'scrapy_splash.SplashCookiesMiddleware': 723,
# 'scrapy_splash.SplashMiddleware': 725,
# 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 'HCK_l.pipelines.ChinaCompanyPipeline': 300,
'HCK_l.pipelines.ChinaDetailPipeline': 301,
# 'HCK_l.pipelines.ChinaDownloadPipeline': 305,
'HCK_l.pipelines.ChinaChangeNamePipeline': 303,
'HCK_l.MyfilesPipeline.MyfilesPipeline': 304,
'HCK_l.pipelines.SZSENewAddPipeline': 306
# 'HCK_l.chinaStartPipeline.endPipeline': 304
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
FILES_URLS_FIELD = "doc_source_url",
FILES_STORE = "/data/OPDCMS/HCK/pdf_l"
"""scrapy-splash"""
# SPLASH_URL = 'http://192.168.99.100:8050'
# DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
#docker run -p 8050:8050 scrapinghub/splash
|
[
"1370153124@qq.com"
] |
1370153124@qq.com
|
976b6f40f74fe705396ec1586f10765ac82dc20a
|
ec9590c45c34d3314869cb65712143cbeb70372d
|
/samples/use_management_resource.py
|
fc631a1b88bbdf7009690ee8ee7897acf69bed66
|
[
"MIT"
] |
permissive
|
areed1192/sigma-azure-management
|
522cee4d858ad7dfbcd8277ddfa15c89ccc0b74f
|
65edeac081e9e2b1ba85d2aa262e73e5f89ae5ca
|
refs/heads/master
| 2022-12-18T07:54:27.659804
| 2020-09-20T23:22:42
| 2020-09-20T23:22:42
| 288,521,057
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
from pprint import pprint
from configparser import ConfigParser
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
# These are only imported for Type Hinting and Intellisense.
from azure.mgmt.resource.resources.models import ResourceGroup
from azure.mgmt.resource.resources.models import GenericResourceExpanded
# Initialize the Parser.
config = ConfigParser()
# Read the file.
try:
config.read('config/config.ini')
except:
config.read('configs/config.ini')
# Grab the Azure Credentials needed.
tenant_id = config.get('azure_credentials', 'azure_tenant_id')
client_id = config.get('azure_credentials', 'azure_client_id')
client_secret = config.get('azure_credentials', 'azure_client_secret')
subscription_id = config.get('azure_credentials', 'azure_subscription_id')
# Define the Credentials.
credential = ServicePrincipalCredentials(
tenant=tenant_id,
client_id=client_id,
secret=client_secret
)
# Pass through the credential.
resource_management_client = ResourceManagementClient(
credentials=credential,
subscription_id=subscription_id
)
# Loop through each resource group that falls under the subscription.
for resource_group in resource_management_client.resource_groups.list():
# Redfine this for Type Hinting.
resource_group: ResourceGroup = resource_group
print('')
print(resource_group.id)
print(resource_group.name)
print(resource_group.managed_by)
print('')
pprint(resource_group.as_dict())
|
[
"alexreed1192@gmail.com"
] |
alexreed1192@gmail.com
|
3ee38e56dbc5fd7ee935ef1c4b6bc3eb7c3e8d41
|
9296896cb6cdc68a27052a36a8fbed48aaf7fac9
|
/aanim/state.py
|
e178a422403d16b8489fc71fbf9a50269a72a687
|
[] |
no_license
|
francolq/algo2
|
28da0cdb35a8d6c32121dc6a4b9747eb4c4a7171
|
ea4940b7d42cf9ae29a57bcc238e475b9427621c
|
refs/heads/master
| 2022-07-14T03:55:35.473395
| 2020-05-13T01:02:41
| 2020-05-13T01:02:41
| 263,446,665
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
from manimlib.imports import *
from aanim.code import code_text
class State(VGroup):
def __init__(self, vars_values):
VGroup.__init__(self)
self.var_dict = var_dict = {}
for i, (name, value) in enumerate(vars_values):
# values to the right
#var = algorithms.code(varname.ljust(maxlen) + ':')
#var.shift(DOWN * i)
#value.next_to(var, RIGHT)
# values below
var = code_text(name)
var.shift(DOWN * 2.2 * i)
var.align_to(ORIGIN, LEFT)
self.add(var)
if value is not None:
value.next_to(var, DOWN)
value.align_to(ORIGIN, LEFT)
self.add(value)
else:
assert False
var_dict[name] = (var, value)
def var(self, name):
return self.var_dict[name][0]
def value(self, name):
return self.var_dict[name][1]
def set_value(self, name, value):
var, old_value = self.var_dict[name]
if old_value is not None:
#value.replace(old_value, dim_to_match=1)
self.remove(old_value)
else:
assert False
#value.align_to(var, LEFT)
#value.next_to(var, DOWN)
self.add(value)
self.var_dict[name] = (var, value)
def del_var(self, name):
var, value = self.var_dict[name]
self.remove(var, value)
def del_value(self, name):
_, value = self.var_dict[name]
self.remove(value)
class Value(VGroup):
def __init__(self, value):
VGroup.__init__(self)
self.value = code_text(value)
self.add(self.value)
def play_change(self, main, value):
old_value = self.value
value2 = code_text(value)
value2.replace(old_value)
value2.set_height(old_value.get_height())
value2.align_to(old_value, LEFT)
main.play(Transform(old_value, value2))
|
[
"francolq@famaf.unc.edu.ar"
] |
francolq@famaf.unc.edu.ar
|
8b3538ad3cfd11dfb423b95da253abaf6ea09ee2
|
e7272e2e1e2da1e60dee1e4dad7154d339a83a9d
|
/migrations/versions/31888642621b_.py
|
e731df004f6c06b046a4ae8901b23a7f1051b43f
|
[] |
no_license
|
Shumpei-Kikuta/rental_app
|
2cdfbb349293b798697c11b3a71b4d199272760d
|
e2c1cf4cfe467a8dd3b9aa692fb13ccdc15cac83
|
refs/heads/master
| 2020-03-22T22:57:11.677838
| 2018-09-19T13:13:16
| 2018-09-19T13:13:16
| 140,780,864
| 0
| 0
| null | 2018-07-26T03:45:04
| 2018-07-13T01:22:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
"""empty message
Revision ID: 31888642621b
Revises: e1adc6885ee7
Create Date: 2018-07-01 16:59:23.157768
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '31888642621b'
down_revision = 'e1adc6885ee7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('chat_table',
sa.Column('chat_id', sa.Integer(), nullable=False),
sa.Column('deal_id', sa.Integer(), nullable=True),
sa.Column('speaker', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('chat_id')
)
op.create_table('deal_table',
sa.Column('deal_id', sa.Integer(), nullable=False),
sa.Column('lender_id', sa.Integer(), nullable=True),
sa.Column('borrower_id', sa.Integer(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('phase', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('deal_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('deal_table')
op.drop_table('chat_table')
# ### end Alembic commands ###
|
[
"shunpei-kikuta775@g.ecc.u-tokyo.ac.jp"
] |
shunpei-kikuta775@g.ecc.u-tokyo.ac.jp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.