blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a4a6045331fbe880e465f92ec60997cf8936fb5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_cabbie.py | 6e6ef5a74bd29c23a2f80dd665c5137381783643 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py |
#calss header
class _CABBIE():
def __init__(self,):
self.name = "CABBIE"
self.definitions = [u'a driver of a taxi']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9be3e219222181adcea02d90ac202c71fab57999 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4051/867004051.py | 975e246781a36ab16a69e9109f3e1c5a1418a908 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 3,180 | py | from bots.botsconfig import *
from records004051 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'PT',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BPT', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'MEA', MIN: 0, MAX: 20},
{ID: 'PSA', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 5, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
]},
]},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
{ID: 'LCD', MIN: 0, MAX: 2},
]},
{ID: 'PTD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'PRF', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'MAN', MIN: 0, MAX: 1},
{ID: 'LCD', MIN: 0, MAX: 2},
{ID: 'LQ', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 5, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'SII', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 1},
]},
]},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LIN', MIN: 0, MAX: 1},
{ID: 'PO3', MIN: 0, MAX: 25},
{ID: 'PO4', MIN: 0, MAX: 1},
{ID: 'UIT', MIN: 0, MAX: 12},
{ID: 'AMT', MIN: 0, MAX: 12},
{ID: 'ITA', MIN: 0, MAX: 10},
{ID: 'PID', MIN: 0, MAX: 200},
{ID: 'MEA', MIN: 0, MAX: 40},
{ID: 'PWK', MIN: 0, MAX: 25},
{ID: 'PKG', MIN: 0, MAX: 25},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'DD', MIN: 0, MAX: 99999},
{ID: 'LDT', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 0, MAX: 100},
]},
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
]},
{ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'FA2', MIN: 1, MAX: 99999},
]},
]},
]},
{ID: 'CTT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 12},
{ID: 'ITA', MIN: 0, MAX: 10},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
4ca43d2b719997ab42b0ad4b0cd4e19ca724f756 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /storagegateway_write_f/working-storage_add.py | fdffa92a273c0f4515c3752bc83ee18b075b6423 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
describe-working-storage : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/storagegateway/describe-working-storage.html
"""
write_parameter("storagegateway", "add-working-storage") | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
e6e142ba00e50a37c2699eb8e541e435194bb656 | f09e98bf5de6f6c49df2dbeea93bd09f4b3b902f | /google-cloud-sdk/lib/surface/compute/operations/__init__.py | 62bb5791805c19a38a10ac3b652bdc619bac8e65 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Peterfeng100/notepal | 75bfaa806e24d85189bd2d09d3cb091944dc97e6 | d5ba3fb4a06516fec4a4ae3bd64a9db55f36cfcd | refs/heads/master | 2021-07-08T22:57:17.407571 | 2019-01-22T19:06:01 | 2019-01-22T19:06:01 | 166,490,067 | 4 | 1 | null | 2020-07-25T04:37:35 | 2019-01-19T00:37:04 | Python | UTF-8 | Python | false | false | 1,057 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Operations(base.Group):
"""Read and manipulate Google Compute Engine operations."""
Operations.category = 'Info'
Operations.detailed_help = {
'brief': 'Read and manipulate Google Compute Engine operations',
}
| [
"kevinhk.zhang@mail.utoronto.ca"
] | kevinhk.zhang@mail.utoronto.ca |
30e09cae99d89491f961c166bc2f75e473bea427 | 0809673304fe85a163898983c2cb4a0238b2456e | /src/lesson_algorithms/functools_reduce_short_sequences.py | 55bf8b3a815b7250c2e53d854cee96174ee95735 | [
"Apache-2.0"
] | permissive | jasonwee/asus-rt-n14uhp-mrtg | 244092292c94ff3382f88f6a385dae2aa6e4b1e1 | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | refs/heads/master | 2022-12-13T18:49:02.908213 | 2018-10-05T02:16:41 | 2018-10-05T02:16:41 | 25,589,776 | 3 | 1 | Apache-2.0 | 2022-11-27T04:03:06 | 2014-10-22T15:42:28 | Python | UTF-8 | Python | false | false | 487 | py | import functools
def do_reduce(a, b):
print('do_reduce({}, {})'.format(a, b))
return a + b
print('Single item in sequence:',
functools.reduce(do_reduce, [1]))
print('Single item in sequence with initializer:',
functools.reduce(do_reduce, [1], 99))
print('Empty sequence with initializer:',
functools.reduce(do_reduce, [], 99))
try:
print('Empty sequence:', functools.reduce(do_reduce, []))
except TypeError as err:
print('ERROR: {}'.format(err))
| [
"peichieh@gmail.com"
] | peichieh@gmail.com |
eb748af2736e8a4cc158f58a4ef86fc66c6f8b14 | aa480d8b09dd7ad92c37c816ebcace24a35eb34c | /third-round/540.有序数组中的单一元素.py | 98dff56fd19fa1ef3b473daf1bbf5de8db64538d | [] | no_license | SR2k/leetcode | 7e701a0e99f9f05b21216f36d2f5ac07a079b97f | de131226159865dcb7b67e49a58d2ddc3f0a82c7 | refs/heads/master | 2023-03-18T03:37:02.916453 | 2022-09-16T01:28:13 | 2022-09-16T01:28:13 | 182,083,445 | 0 | 0 | null | 2023-03-08T05:44:26 | 2019-04-18T12:27:12 | Python | UTF-8 | Python | false | false | 1,739 | py | #
# @lc app=leetcode.cn id=540 lang=python3
#
# [540] 有序数组中的单一元素
#
# https://leetcode-cn.com/problems/single-element-in-a-sorted-array/description/
#
# algorithms
# Medium (61.02%)
# Likes: 480
# Dislikes: 0
# Total Accepted: 88.8K
# Total Submissions: 145.7K
# Testcase Example: '[1,1,2,3,3,4,4,8,8]'
#
# 给你一个仅由整数组成的有序数组,其中每个元素都会出现两次,唯有一个数只会出现一次。
#
# 请你找出并返回只出现一次的那个数。
#
# 你设计的解决方案必须满足 O(log n) 时间复杂度和 O(1) 空间复杂度。
#
#
#
# 示例 1:
#
#
# 输入: nums = [1,1,2,3,3,4,4,8,8]
# 输出: 2
#
#
# 示例 2:
#
#
# 输入: nums = [3,3,7,7,10,11,11]
# 输出: 10
#
#
#
#
#
#
# 提示:
#
#
# 1 <= nums.length <= 10^5
# 0 <= nums[i] <= 10^5
#
#
#
# @lc code=start
class Solution:
def singleNonDuplicate(self, nums: list[int]) -> int:
left, right = 0, len(nums) - 1
while left + 1 < right:
middle = (left + right) >> 1
if self.check(nums, middle):
left = middle
else:
right = middle
if not self.check(nums, left):
return nums[left]
return nums[right]
def check(self, nums: list[int], i: int):
if i % 2:
return i - 1 >= 0 and nums[i] == nums[i - 1]
else:
return i + 1 < len(nums) and nums[i] == nums[i + 1]
# @lc code=end
print(Solution().singleNonDuplicate([1,1,2,3,3,4,4,8,8]))
print(Solution().singleNonDuplicate([3,3,7,7,10,11,11]))
print(Solution().singleNonDuplicate([1,3,3]))
print(Solution().singleNonDuplicate([3,3,1]))
print(Solution().singleNonDuplicate([1]))
| [
"luozhou.csy@alibaba-inc.com"
] | luozhou.csy@alibaba-inc.com |
d66c544c7a655a84d2c0492fddee34becf974d0a | 1e987bd8b8be0dc1c139fa6bf92e8229eb51da27 | /util/freefocus/server/flask/flask.py | a41889348901545b057d2d513b7db5ebe3f9a5ae | [] | no_license | tszdanger/phd | c97091b4f1d7712a836f0c8e3c6f819d53bd0dd5 | aab7f16bd1f3546f81e349fc6e2325fb17beb851 | refs/heads/master | 2023-01-01T00:54:20.136122 | 2020-10-21T18:07:42 | 2020-10-21T18:09:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,738 | py | #!/usr/bin/env python
"""Run a REST server."""
import typing
from argparse import ArgumentParser
from contextlib import contextmanager
import flask
import flask_cors
import sqlalchemy
from flask import abort
from flask import request
from util.freefocus import freefocus
from util.freefocus import sql
app = flask.Flask(__name__)
flask_cors.CORS(app)
app.config.from_object("config")
make_session = None
@contextmanager
def Session(commit: bool = False) -> sqlalchemy.orm.session.Session:
"""Provide a transactional scope around a series of operations."""
session = make_session()
try:
yield session
if commit:
session.commit()
except:
session.rollback()
raise
finally:
session.close()
API_BASE = f"/api/v{freefocus.SPEC_MAJOR}.{freefocus.SPEC_MINOR}"
URL_STUB = "http://" + app.config.get("SERVER_NAME", "") + API_BASE
def active_task_graph():
def build_graph(session, task: sql.Task):
return {
"id": task.id,
"body": task.body.split("\n")[0],
"completed": True if task.completed else False,
"children": [
build_graph(session, child)
for child in session.query(sql.Task)
.filter(sql.Task.parent_id == task.id)
.order_by(sql.Task.created.desc())
],
}
with Session() as session:
# List 'root' tasks
q = (
session.query(sql.Task)
.filter(sql.Task.parent_id == None)
.order_by(sql.Task.created.desc())
)
r = [build_graph(session, t) for t in q]
return r
@app.route("/")
def index():
data = {
"freefocus": {
"version": f"{freefocus.SPEC_MAJOR}.{freefocus.SPEC_MINOR}.{freefocus.SPEC_MICRO}",
},
"assets": {
"cache_tag": 1,
"bootstrap_css": flask.url_for("static", filename="bootstrap.css"),
"styles_css": flask.url_for("static", filename="styles.css"),
"site_js": flask.url_for("static", filename="site.js"),
},
"tasks": active_task_graph(),
}
return flask.render_template("lists.html", **data)
def response(data):
""" make an API response """
return jsonify(data)
def paginated_response(iterable: typing.Iterable):
""" make a paginated API response """
# TODO: chunk and paginate
return response(list(iterable))
def truncate(string: str, maxlen=144):
suffix = "..."
if len(string) > maxlen:
truncated = string[: maxlen - len(suffix)] + suffix
return {"data": truncated, "truncated": True}
else:
return {"data": string, "truncated": False}
def task_url(task: sql.Task):
return URL_STUB + f"/tasks/{task.id}"
def group_url(group: sql.Group):
return URL_STUB + f"/groups/{group.id}"
def asset_url(asset: sql.Asset):
return URL_STUB + f"/assets/{group.id}"
def date(d):
if d:
return d.isoformat()
else:
return None
@app.errorhandler(404)
def not_found(error):
""" 404 error handler """
return make_response(jsonify({"error": "Not found"}), 404)
@app.errorhandler(400)
def not_found(error):
""" 400 Bad Request """
return make_response(jsonify({"error": "Bad Request"}), 400)
@app.route(API_BASE + "/persons", methods=["GET"])
def get_persons():
with Session() as session:
q = session.query(sql.Person)
return paginated_response(p.json() for p in q)
@app.route(API_BASE + "/persons/<int:person_uid>", methods=["GET"])
def get_person(person_uid: int):
with Session() as session:
p = session.query(sql.Person).filter(sql.Person.uid == person_uid).first()
if not p:
abort(404)
return response(p.json())
@app.route(API_BASE + "/persons/<int:person_uid>/groups", methods=["GET"])
def get_person_groups(person_uid: int):
with Session() as session:
p = session.query(sql.Person).filter(sql.Person.uid == person_uid).first()
if not p:
abort(404)
return paginated_response(g.json() for g in p.groups)
@app.route(API_BASE + "/tasks", methods=["GET"])
def get_tasks():
def build_graph(session, task: sql.Task = None):
parent = None if task is None else task.id
q = (
session.query(sql.Task)
.filter(sql.Task.parent_id == parent)
.order_by(sql.Task.created.desc())
)
# "Completed" request parameter
completed = request.args.get("completed", None)
if completed is not None:
if completed == "true":
q = q.filter(sql.Task.completed)
elif completed == "false":
q = q.filter(sql.Task.completed == None)
else:
abort(400)
children = [build_graph(session, t) for t in q]
if task is None:
return children
else:
return {
"url": task_url(task),
"body": truncate(task.body),
"status": task.status,
"assigned": [g.id for g in task.assigned],
"children": children,
}
with Session() as session:
return paginated_response(build_graph(session))
@app.route(API_BASE + "/tasks/<int:task_id>", methods=["GET"])
def get_task(task_id: int):
with Session() as session:
t = session.query(sql.Task).filter(sql.Task.id == task_id).first()
if not t:
abort(404)
return response(
{
"body": t.body,
"assigned": t.is_assigned,
"blocked": t.is_blocked,
"defer_until": date(t.defer_until),
"start_on": date(t.start_on),
"estimated_duration": t.duration,
"due": date(t.due),
"started": date(t.started),
"completed": date(t.completed),
"created": {"at": date(t.created), "by": group_url(t.created_by),},
}
)
@app.route(API_BASE + "/tasks/<int:task_id>/owners", methods=["GET"])
def get_task_owners(task_id: int):
with Session() as session:
t = session.query(sql.Task).filter(sql.Task.id == task_id).first()
if not t:
abort(404)
# TODO: summary
return paginated_response(group_url(g) for g in t.owners)
@app.route(API_BASE + "/tasks/<int:task_id>/assigned", methods=["GET"])
def get_task_assigned(task_id: int):
with Session() as session:
t = session.query(sql.Task).filter(sql.Task.id == task_id).first()
if not t:
abort(404)
# TODO: summary
return paginated_response(group_url(g) for g in t.assigned)
@app.route(API_BASE + "/tasks", methods=["POST"])
def add_task():
with Session(commit=True) as session:
pass
def main():
global make_session
parser = ArgumentParser(description=__doc__)
parser.add_argument("uri")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
engine = sqlalchemy.create_engine(args.uri, echo=args.verbose)
sql.Base.metadata.create_all(engine)
sql.Base.metadata.bind = engine
make_session = sqlalchemy.orm.sessionmaker(bind=engine)
app.RunWithArgs(debug=True, host="0.0.0.0")
if __name__ == "__main__":
main()
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
77331412b268e9fbf82fdb836edc42208c80e3a2 | bc2742cac4347eb8652295a0d4aeb8633eea7c1b | /tests/layers/test_gated_average_layer.py | 9b7827828d7f8e22c374d627ce99185f293b49e2 | [
"MIT"
] | permissive | temp3rr0r/neupy | 933648658cc2a5e85e0fc3955de0a3de65ea97c1 | f36071f5f46bf79ffd18485acca941db578656e8 | refs/heads/master | 2023-05-25T09:55:10.654122 | 2018-12-17T16:56:30 | 2018-12-17T16:56:30 | 163,117,180 | 0 | 0 | MIT | 2023-05-22T21:44:38 | 2018-12-25T23:24:17 | Python | UTF-8 | Python | false | false | 5,202 | py | import numpy as np
from neupy import layers
from neupy.utils import asfloat
from neupy.exceptions import LayerConnectionError
from base import BaseTestCase
class GatedAverageTestCase(BaseTestCase):
def test_gated_average_layer_negative_index(self):
gated_avg_layer = layers.GatedAverage(gating_layer_index=-1)
layers.join([
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
layers.Input(10) > layers.Softmax(2),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(8,), (8,), (2,)])
gated_avg_layer = layers.GatedAverage(gating_layer_index=-3)
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(2,), (8,), (8,)])
def test_gated_average_layer_exceptions_index_position(self):
gated_avg_layer = layers.GatedAverage(gating_layer_index=3)
with self.assertRaisesRegexp(LayerConnectionError, "Invalid index"):
layers.join([
layers.Input(20) > layers.Relu(8),
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
gated_avg_layer = layers.GatedAverage(gating_layer_index=-4)
with self.assertRaisesRegexp(LayerConnectionError, "Invalid index"):
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
def test_gated_average_layer_exceptions(self):
gated_avg_layer = layers.GatedAverage()
with self.assertRaisesRegexp(LayerConnectionError, "should be vector"):
layers.join([
layers.Input((10, 3, 3)), # shape not 1d
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
gated_avg_layer = layers.GatedAverage()
error_message = "only 3 networks, got 2 networks"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join([
layers.Input(10) > layers.Softmax(3),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
gated_avg_layer = layers.GatedAverage()
error_message = "expect to have the same shapes"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(10),
], gated_avg_layer)
def test_gated_average_layer_non_default_index(self):
gated_avg_layer = layers.GatedAverage(gating_layer_index=1)
layers.join([
layers.Input(20) > layers.Relu(8),
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(8,), (2,), (8,)])
def test_gated_average_layer_output_shape(self):
gated_avg_layer = layers.GatedAverage()
self.assertIsNone(gated_avg_layer.output_shape)
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(2,), (8,), (8,)])
def test_gated_average_layer_output(self):
input_layer = layers.Input(10)
network = layers.join(
[
input_layer > layers.Softmax(2),
input_layer > layers.Relu(8),
input_layer > layers.Relu(8),
],
layers.GatedAverage()
)
random_input = asfloat(np.random.random((20, 10)))
actual_output = self.eval(network.output(random_input))
self.assertEqual(actual_output.shape, (20, 8))
def test_gated_average_layer_multi_dimensional_inputs(self):
input_layer = layers.Input((5, 5, 1))
network = layers.join(
[
input_layer > layers.Reshape() > layers.Softmax(2),
input_layer > layers.Convolution((2, 2, 3)),
input_layer > layers.Convolution((2, 2, 3)),
],
layers.GatedAverage()
)
self.assertEqual(network.input_shape, (5, 5, 1))
self.assertEqual(network.output_shape, (4, 4, 3))
random_input = asfloat(np.random.random((8, 5, 5, 1)))
actual_output = self.eval(network.output(random_input))
self.assertEqual(actual_output.shape, (8, 4, 4, 3))
| [
"mail@itdxer.com"
] | mail@itdxer.com |
6329de61ee60371c8076e5ae6630d63412935d5a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_106/ch60_2020_05_05_08_13_18_583778.py | 43482c80ca128e108aa30532e4ae4edf9f5a01b4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | def eh_palindromo(string):
new=[]
for i in string:
new.append(i)
new.reverse()
news=''.join(new)
if news==string:
return True
else:
return False | [
"you@example.com"
] | you@example.com |
69540f2a46c06f8779897219544ba72f26af92e3 | bbf874cf4abb20e7ec5c66e808e97ae6f2043c3f | /0x01-python-if_else_loops_functions/1-last_digit.py | b40981bd458d6316d7bf311194b37fbc97f191f0 | [] | no_license | imperfectskillz/holbertonschool-higher_level_programming | 105fd80c2bea8fbb60eb786ce9019b3f63188342 | 704e99b29125d6449db32b9d52ede443318df620 | refs/heads/master | 2021-09-14T10:44:22.551896 | 2018-05-12T03:38:59 | 2018-05-12T03:38:59 | 113,130,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number >= 0:
last = number % 10
if last == 0:
print("Last digit of {} is {} and is 0".format(number, last))
elif last < 6:
print("last digit of {} is {} and is less than 6 and not 0".format(number, last))
elif last > 5:
print("Last digit of {} is {} and is greater than 5".format(number, last))
elif number < 0:
last = number % -10
print("Last digit of {} is {} and is less than 6 and not 0".format(number, last))
| [
"j.choi.89@gmail.com"
] | j.choi.89@gmail.com |
629f048bdb22a01508441f0c624ddca24a37e392 | bcca6c84d7fd2cbb782b38c68425b24a2dedeaee | /tests/chainsync/test_chainsync_adapter.py | 854d25620dc372d8124deb67d56a268041972a18 | [
"MIT"
] | permissive | dpays/chainsync | 8ba3f057e889f6b3b7d6405cbea1bf350493164a | 1277363787e37aa595571ab8a789831aadf0d3e6 | refs/heads/master | 2020-03-28T12:05:39.448076 | 2018-06-15T04:16:56 | 2018-06-15T04:16:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | from chainsync import ChainSync
from chainsync.adapters.steem import SteemAdapter
import unittest
class ChainSyncAdapterTestCase(unittest.TestCase):
def setUp(self):
self.chainsync = ChainSync(adapter=SteemAdapter)
def test_adapter_init_default_adapter(self):
self.assertNotEqual(self.chainsync.adapter, None)
def test_adapter_init_custom_adapter_custom_endpoint_no_endpoints(self):
adapter = SteemAdapter()
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.endpoint, 'http://localhost:8090')
def test_adapter_init_custom_adapter_custom_endpoint_string(self):
adapter = SteemAdapter(endpoints='http://localhost:8091')
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.endpoint, 'http://localhost:8091')
def test_adapter_init_custom_adapter_custom_endpoint_list(self):
endpoints = ['http://localhost:8091', 'http://localhost:8090']
adapter = SteemAdapter(endpoints=endpoints)
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.endpoint, 'http://localhost:8091')
self.assertEqual(custom.adapter.endpoints, endpoints)
def test_adapter_debug_flag_default_false(self):
self.assertEqual(self.chainsync.adapter.debug, False)
def test_adapter_debug_flag_set_true(self):
adapter = SteemAdapter(debug=True)
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.debug, True)
def test_adapter_debug_flag_set_true_from_main(self):
adapter = SteemAdapter()
custom = ChainSync(adapter, debug=True)
self.assertEqual(custom.adapter.debug, True)
def test_adapter_debug_flag_set_true_from_main_false_for_adapter(self):
adapter = SteemAdapter(debug=False)
# main debug flag should override adapter
custom = ChainSync(adapter, debug=True)
self.assertEqual(custom.adapter.debug, True)
| [
"aaron.cox@greymass.com"
] | aaron.cox@greymass.com |
2e47504f5f032988f0852462405fce0dfe5432fc | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_run/reduce_min_run.py | e46d731933bfc91f2158e842557a774c331fd039 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reduce_min_run"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import reduce_min
from akg.utils.dsl_create import get_reduce_out_shape
from tests.common.gen_random import random_gaussian
from tests.common.base import get_rtol_atol
def reduce_min_run(shape, axis, keepdims, dtype, kernel_name="reduce_min", attrs=None):
"""run function for dsl function reduce_min."""
if attrs is None:
attrs = {}
op_attrs = [axis, keepdims]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(reduce_min.reduce_min, [shape], [dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, inputs, output = gen_data(axis, dtype, keepdims, shape)
return mod, expect, (inputs, output)
return mod
mod = utils.op_build_test(reduce_min.reduce_min, [shape], [dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)
expect, inputs, output = gen_data(axis, dtype, keepdims, shape)
output = utils.mod_launch(mod, (inputs, output), expect=expect)
rtol, atol = get_rtol_atol("reduce_min", dtype)
return inputs, output, expect, compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True)
def gen_data(axis, dtype, keepdims, shape):
"""Generates input, output and expect data."""
inputs = random_gaussian(shape, miu=0, sigma=100.0).astype("float16").astype(dtype.lower())
expect = np.amin(inputs, axis=axis, keepdims=keepdims)
out_shape = get_reduce_out_shape(shape, axis=axis, keepdims=keepdims)
output = np.full(out_shape, np.nan, dtype)
return expect, inputs, output
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
275470b552dbd57a9f1a0a80b53b397c66d110f4 | 88bd9a59edb18ecc51b3c0939cd96fb5baebf18d | /tests/test_linkedlist.py | 777e69c1866af79b65e9f2b806460f6737d2df3c | [] | no_license | LennyBoyatzis/compsci | 244d540485155d5846a93a84399d18097925468e | 9c1ca2288090ad7c1c7151202bb28990760b1997 | refs/heads/master | 2022-07-20T22:25:42.549428 | 2019-09-05T05:26:18 | 2019-09-05T05:26:18 | 91,846,937 | 0 | 0 | null | 2022-06-21T21:45:37 | 2017-05-19T21:31:42 | Python | UTF-8 | Python | false | false | 305 | py | from datastructures.linkedlist import SingleLinkedList
def test_single_linked_list_init():
linked_list = SingleLinkedList()
assert linked_list.head is None
def test_push_front_method():
linked_list = SingleLinkedList()
linked_list.push_front(7)
assert linked_list.head is not None
| [
"lennyboyatzis@gmail.com"
] | lennyboyatzis@gmail.com |
8beaeecef14307357208d23d9892986cb71a2f27 | 93ceca6312bbbee196d57df3a6634bd66800bd26 | /hgsc_vcf/io.py | bc241057fc45947ee6620d435b769723830ab730 | [] | no_license | OpenGenomics/muse-tool | a9c058bddce466a6af64447a7fbdfc68a593f7c0 | cd59173c8e69c2a4f073ac2f1e07402835400b85 | refs/heads/master | 2021-01-24T05:25:28.170881 | 2019-09-23T07:50:54 | 2019-09-23T07:50:54 | 59,329,634 | 1 | 5 | null | 2018-02-10T17:53:16 | 2016-05-20T22:17:03 | Python | UTF-8 | Python | false | false | 4,494 | py |
import csv
from hgsc_vcf.metainfo import *
from collections import *
class Reader(object):
def __init__(self, fobj):
self.fobj = fobj
self.header = VCFHeader()
self.header.load(self.fobj)
self._next = None
@staticmethod
def parse_info_field(info):
infos = info.split(';')
result = OrderedDict()
for i in infos:
if '=' in i:
k, v = i.split('=',1)
result[k] = v.split(',')
else:
result[i] = True # True indicates that the flag is active
return result
def peek(self):
return self._next
def take(self):
old = self._next
try:
self._next = self.next()
except StopIteration:
pass # swallow the error
return old
@staticmethod
def parse_sample(format_keys, slist):
return OrderedDict(zip(format_keys, [i.split(',') for i in slist]))
def __iter__(self):
return self
def next(self):
line = [c.strip() for c in self.fobj.readline().split('\t')]
if len(line) < 1 or line[0] == '':
self._next = None
raise StopIteration
try:
record = OrderedDict()
for k, v in (
('CHROM', line[0]),
('POS', int(line[1])),
('ID', line[2].split(';')),
('REF', line[3]),
('ALT', line[4].split(',')),
('QUAL', float(line[5]) if line[5] != '.' else '.'),
('FILTER', line[6].split(';')),
('INFO', Reader.parse_info_field(line[7]))
):
record[k] = v
if len(line) > 8:
record['FORMAT'] = line[8].split(':')
if len(line) > 9:
record['SAMPLES'] = OrderedDict(zip(
self.header.samples,
[Reader.parse_sample(record['FORMAT'], s.split(':')) for s in line[9:]]
))
self._next = record
return record
except:
print line
raise
class Writer(object):
def __init__(self, fobj, header):
assert isinstance(header, VCFHeader), "header must be a VCFHeader"
self.header = header
self.header_written = False
self.fobj = fobj
def write_header(self):
if self.header_written:
raise ValueError("Can't write the header twice")
for h in self.header.headers:
self.fobj.write(str(h) + '\n')
header_cols = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']
if len(self.header.samples) > 0:
header_cols.append('FORMAT')
for s in self.header.samples:
header_cols.append(s)
self.fobj.write('#' + '\t'.join(header_cols) + '\n')
self.header_written = True
def write_record(self, record):
if not self.header_written:
raise ValueError("Must write the header first")
field_parts = []
for k, joiner in (('CHROM', None), ('POS', None), ('ID', ';'), ('REF', None), ('ALT', ','), ('QUAL', None), ('FILTER', ';')):
if joiner:
try:
field_parts.append(joiner.join(record[k]))
except:
print k, joiner, record[k]
raise
else:
field_parts.append(str(record[k]))
# info is a bit trickier
info_parts = []
for k, v in record['INFO'].items():
if k == '.' and len(record['INFO']) > 1:
continue # this is a leftover empty marker
if isinstance(v, list):
info_parts.append('%s=%s' % (k, ','.join(v)))
else:
info_parts.append(k)
field_parts.append(';'.join(info_parts))
if len(self.header.samples) > 0:
field_parts.append(':'.join(record['FORMAT']))
for s in self.header.samples:
sinfo = record['SAMPLES'][s]
# sinfo is a dict (OrderedDict ideally)
try:
field_parts.append(':'.join([','.join(sinfo[k]) for k in record['FORMAT']]))
except:
print sinfo
raise
self.fobj.write('\t'.join(field_parts) + '\n')
| [
"kellrott@gmail.com"
] | kellrott@gmail.com |
dbdbac578d96de37dc813bbbeee84b1773aabd04 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_30/ar_/test_artificial_128_Anscombe_Lag1Trend_30__100.py | 5582c252a3315ff23753b7b0b1494b978e138ecb | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 270 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
22857c0d3ef535f2ee9b813146680f4fb73bde70 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/11/usersdata/64/5178/submittedfiles/jogo.py | 5dd413f8c66a659270296c78990142bc4fae2baa | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
Cv = input('Digite o número de vitótias do Cormengo: ')
Ce = input('Digite o número de empates do Cormengo: ')
Cs = input('Digite o saldo de gols: ')
Fv = input('Digite o número de vitótias do Flaminthians: ')
Fe = input('Digite o número de empates do Flaminthians: ')
Fs = input('Digite o saldo de gols: ')
if Cv > Fv:
print "'C'"
elif Cv < Fv:
print "'F'"
elif Cv == Fv:
if Cs > Fs:
print "'C'"
elif Cs < Fs:
print "'F'"
elif Cs == Fs:
if Ce > Fe:
print "'C'"
elif Ce < Fe:
print "'V'"
else:
print "'='"
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8a552617657674743ae61f13d460d8618ba37d1e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_frenzy.py | 5638f60ebd7f097df0883fb40ad7ee40d162e391 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py |
#calss header
class _FRENZY():
def __init__(self,):
self.name = "FRENZY"
self.definitions = [u'(an example of) uncontrolled and excited behaviour or emotion that is sometimes violent: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
173535ac75a264211a5a21d5ad8aa98a968678e0 | e2a9c92dc7a4b2e73f2bcc1a2d507b591b75f814 | /Many-Tricks-for-ImageCalssification/model/__init__.py | abb0584a4cdb1e1724c348121213c2b82920356c | [] | no_license | ForrestPi/Tricks | f5f74f47e6e6ec24a5a544837eba35c5ce8e773e | d874318d4662c0192bcd147e1aad1e92cd3ea8b6 | refs/heads/master | 2022-12-08T18:06:24.390238 | 2020-10-20T04:01:04 | 2020-10-20T04:01:04 | 220,658,244 | 4 | 2 | null | 2022-12-08T03:44:45 | 2019-11-09T14:59:23 | Jupyter Notebook | UTF-8 | Python | false | false | 580 | py | # -*- encoding: utf-8 -*-
'''
@File : __init__.py.py
@Contact : whut.hexin@foxmail.com
@License : (C)Copyright 2017-2018, HeXin
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/2/21 10:18 xin 1.0 None
'''
from .baseline import BaseLine
def build_model(cfg):
if cfg.MODEL.NAME == "baseline":
model = BaseLine(cfg.MODEL.N_CHANNEL, cfg.MODEL.N_CLASS, cfg.MODEL.BACKBONE, cfg.MODEL.DROPOUT, cfg.MODEL.USE_NONLOCAL, cfg.MODEL.USE_SCSE, cfg.MODEL.USE_ATTENTION)
return model
| [
"forrest_zhu@foxmail.com"
] | forrest_zhu@foxmail.com |
68551acd8cab934463307fbf762962f323034d23 | b4e072d0759775836155ae97e06f6d1f0fce7500 | /dasss/__init__.py | 7722037351c57feb8d85a4b22e638d7da7153ed5 | [] | no_license | David-Hakobyan1/_different | 99abdfd62db8224dcc1a944537f43e3d83d34921 | 7c57f626cac7acfb2666d28f1a623ed60f8020d4 | refs/heads/master | 2023-06-18T00:27:18.745467 | 2021-07-19T09:01:52 | 2021-07-19T09:01:52 | 387,401,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import os
from flask import Flask
from flask_sqalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
login_manager = LoginManager()
app = Flask(__name__)
app.config['SECRET_KEY']='fhuiefh236374623ewhfwwhfeu'
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+os.path(basedir,'data.sqlite')
db = SQLAlchemy(app)
Migrate(app,db)
login_manager.init_app(app)
login_manager.login_view = 'login'
| [
"my@mail.ru"
] | my@mail.ru |
ab583ceecd8854228670358e21d0f10d0cbf031d | 114b61513733083555924fc8ab347335e10471ae | /stackone/stackone/model/LDAPGroupManager.py | 0a863ce54ec5c114a96c1a39e5db5fec2260546d | [] | no_license | smarkm/ovm | 6e3bea19816affdf919cbd0aa81688e6c56e7565 | cd30ad5926f933e6723805d380e57c638ee46bac | refs/heads/master | 2021-01-21T04:04:28.637901 | 2015-08-31T03:05:03 | 2015-08-31T03:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,081 | py | from stackone.core.utils.utils import to_unicode, to_str, get_ldap_module
import tg
import logging
LOGGER = logging.getLogger('stackone.model')
from pprint import pprint
LDAP_Groups_Dict = {}
class LDAPBaseGroup():
def __init__(self):
self.ldap = get_ldap_module()
self.SCOPE_BASE = self.ldap.SCOPE_BASE
self.user_key = tg.config.get('user_key', 'uid')
self.group_key = tg.config.get('group_key', 'groupMembership')
self.email_key = tg.config.get('email_key', 'email')
def get_all_user(self, ldapcon, group_base_dn):
return self._get_all_user(ldapcon, group_base_dn)
def get_user_by_dn(self, ldapcon, user_dn, scope=None):
try:
if not scope:
scope = self.SCOPE_BASE
results = ldapcon.search_s(user_dn,scope)
return results
except Exception as e:
pass
def get_user_from_user_dn(self, dn):
dn_dict = dict([item.split('=') for item in dn.split(',')])
if dn_dict.has_key(self.user_key):
if not dn_dict.get(self.user_key):
LOGGER.error('%s is None in user DN: %s' % (self.user_key, dn))
return dn_dict.get(self.user_key)
LOGGER.error('Can not find %s in user DN: %s' % (self.user_key, dn))
class LDAPGroupOfNames(LDAPBaseGroup):
MEMBER_ATTR = 'member'
def get_user_groups(self, user_details, group_key):
group_details = self._get_user_groups(user_details, group_key)
group_names = self.parse_group(group_details)
return group_names
def parse_group(self, group_details):
group_names = self._parse_group(group_details)
return group_names
def _get_user_groups(self, user_details, group_key):
try:
group_details = user_details.get(group_key)
return group_details
except Exception as e:
print e
def _parse_group(self, group_details):
l = []
try:
for gp_name_str in group_details:
for item in gp_name_str.split(','):
splt = item.split('=')
if len(splt) == 1:
l.append(splt[0])
if splt[0] == self.user_key:
l.append(splt[1])
except Exception as ex:
raise ex
return l
def _get_all_user(self, ldapcon, group_base_dn, scope=None):
try:
if not scope:
scope = self.SCOPE_BASE
result = []
result_list = []
result_data_dict = {}
try:
result = ldapcon.search_s(group_base_dn, scope)
except Exception as e:
import traceback
if not len(result):
LOGGER.info('Could not find group: %s' % group_base_dn)
else:
g_dn,result_data_dict = result[0]
users = result_data_dict.get(self.MEMBER_ATTR)
LOGGER.info('Members of Group:%s from LDAP: ===== %s \n' % (group_base_dn, users))
for user in users:
user_info = self.get_user_by_dn(ldapcon, user)
LOGGER.info('Info of User:%s ==== %s' % (user, user_info))
if not user_info:
LOGGER.info('Could not find user: %s' % user)
u_dn,usr_info = user_info[0]
LOGGER.info('DN and Info of User:%s ==== DN:%s ==== Info:%s' % (user, u_dn, usr_info))
res_dict = {self.user_key: self.get_user_from_dn(u_dn),self.group_key:usr_info.get(self.group_key),self.email_key:usr_info.get(self.email_key)}
result_list.append(res_dict)
LOGGER.info('Members of Group:%s after Parsing: ===== %s \n' % (group_base_dn, result_list))
return result_list
except Exception as e:
import traceback
traceback.print_exc()
LOGGER.error(e)
raise e
LDAP_Groups_Dict['groupOfNames'] = LDAPGroupOfNames
| [
"18614072558@163.com"
] | 18614072558@163.com |
ed8306c08a8e419a2ded4aea0ef8cf8222dbdc06 | 30a2f77f5427a3fe89e8d7980a4b67fe7526de2c | /analyze/BHistograms_trigjetht500_eta1p7_CSVM_cfg.py | 997496691c837cd4dca4f3093e8fd3e8b1c3cfd4 | [] | no_license | DryRun/QCDAnalysis | 7fb145ce05e1a7862ee2185220112a00cb8feb72 | adf97713956d7a017189901e858e5c2b4b8339b6 | refs/heads/master | 2020-04-06T04:23:44.112686 | 2018-01-08T19:47:01 | 2018-01-08T19:47:01 | 55,909,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,632 | py | import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
import sys
options = VarParsing.VarParsing()
options.register('inputFiles',
'/uscms/home/dryu/eosdir/BJetPlusX/QCDBEventTree_BJetPlusX_Run2012B_v1_3/160429_121519/0000/QCDBEventTree_567.root',
VarParsing.VarParsing.multiplicity.list,
VarParsing.VarParsing.varType.string,
"List of input files"
)
options.register('outputFile',
'BHistograms_trigjetht_CSVL.root',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"Output file"
)
options.register('dataSource',
'collision_data',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'collision_data or simulation'
)
options.register('dataType',
'data',
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'data, signal, or background'
)
options.register('signalMass',
750.,
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.float,
'Signal mass hypothesis (only necessary for running over signal)'
)
options.parseArguments()
if options.dataSource != "collision_data" and options.dataSource != "simulation":
print "[BHistograms_BJetPlusX_loose] ERROR : dataSource must be collision_data or simulation"
sys.exit(1)
if not options.dataType in ["data", "signal", "background"]:
print "[BHistograms_BJetPlusX_loose] ERROR : dataType must be data, signal, or background"
sys.exit(1)
process = cms.Process("myprocess")
process.TFileService=cms.Service("TFileService",fileName=cms.string(options.outputFile))
##-------------------- Define the source ----------------------------
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
##-------------------- Cuts ------------------------------------------
# Cuts on the leading two jets
dijet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxAbsEta"),
parameters = cms.vdouble(1.7),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("IsTightID"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxMuonEnergyFraction"),
parameters = cms.vdouble(0.8),
descriptors = cms.vstring()
),
)
# Cuts on all PF jets (defines the generic jet collection for e.g. making fat jets)
pfjet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MaxAbsEta"),
parameters = cms.vdouble(5),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("IsLooseID"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
)
# Cuts on calo jets
calojet_cuts = cms.VPSet(
cms.PSet(
name = cms.string("MinPt"),
parameters = cms.vdouble(30.),
descriptors = cms.vstring()
)
)
# Event cuts
event_cuts = cms.VPSet(
cms.PSet(
name = cms.string("TriggerOR"),
parameters = cms.vdouble(),
descriptors = cms.vstring('HLT_HT500_v1', 'HLT_HT500_v2', 'HLT_HT500_v3', 'HLT_HT500_v4', 'HLT_HT500_v5', 'HLT_HT500_v7')
),
cms.PSet(
name = cms.string("MaxMetOverSumEt"),
parameters = cms.vdouble(0.5),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("GoodPFDijet"),
parameters = cms.vdouble(),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinNCSVM"),
parameters = cms.vdouble(2),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinLeadingPFJetPt"),
parameters = cms.vdouble(80.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("MinSubleadingPFJetPt"),
parameters = cms.vdouble(70.),
descriptors = cms.vstring()
),
cms.PSet(
name = cms.string("PFDijetMaxDeltaEta"),
parameters = cms.vdouble(1.3),
descriptors = cms.vstring()
)
)
##-------------------- User analyzer --------------------------------
process.BHistograms = cms.EDAnalyzer('BHistograms',
file_names = cms.vstring(options.inputFiles),
tree_name = cms.string('ak5/ProcessedTree'),
trigger_histogram_name = cms.string('ak5/TriggerNames'),
#triggers = cms.vstring('HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v2:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v3:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v4:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v5:L1_DoubleJetC36', 'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v7:L1_DoubleJetC36'),
#triggers = cms.vstring( 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v2:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v3:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v4:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v5:L1_SingleJet128', 'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v7:L1_SingleJet128'),
data_source = cms.string(options.dataSource),
data_type = cms.string(options.dataType),
signal_mass = cms.double(options.signalMass),
max_events = cms.int32(-1),
dijet_cuts = dijet_cuts,
pfjet_cuts = pfjet_cuts,
calojet_cuts = calojet_cuts,
event_cuts = event_cuts,
fatjet_delta_eta_cut = cms.double(1.1),
btag_wp_1 = cms.string('CSVM'),
btag_wp_2 = cms.string('CSVM'),
)
process.p = cms.Path(process.BHistograms)
| [
"david.renhwa.yu@gmail.com"
] | david.renhwa.yu@gmail.com |
5ea32706b22f4c1e48cb23b8886f3e3df3d91e25 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901090012/1001S02E05_string.py | e71588946e9eb67f1c9fa471a58baa40eac41897 | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 1,183 | py | str = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
str1 = str.replace("better","worse")
str2 = str1.replace(',','').replace('.','').replace('--','').replace('*','').replace('!','')
str3 = str2.split()
str4 = list()
for i in str3:
if 'ea' not in i:
str4.append(i)
str5 = list()
for j in str3:
k=j.swapcase()
str5.append(k)
print(sorted(str5))
| [
"46160162+EthanYan6@users.noreply.github.com"
] | 46160162+EthanYan6@users.noreply.github.com |
c42629d89614c8314f3d2663c0538bbccebedb25 | 38d93c5fd72fee380ec431b2ca60a069eef8579d | /Baekjoon,SWEA, etc/프로그래머스/경주로 건설.py | 2cdea97359bc34f435388bc297c0774193a060ef | [] | no_license | whgusdn321/Competitive-programming | 5d1b681f5bee90de5678219d91cd0fa764476ddd | 3ff8e6b1d2facd31a8210eddeef851ffd0dce02a | refs/heads/master | 2023-01-01T01:34:22.936373 | 2020-10-24T11:05:08 | 2020-10-24T11:05:08 | 299,181,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py |
def dfs(y, x, board, dp, n, mode):
dp[y][x] = n
h = len(board)
w = len(board[0])
if y == h and x == w:
return
right = [(y, x+1), (y, x-1)]
up = [(y+1, x), (y-1, x)]
if mode == 'down':
for ny, nx in right:
if 0<=ny<h and 0<=nx<w and dp[ny][nx] >= n + 500 and board[ny][nx] == 0:
dfs(ny, nx, board, dp, n+600, 'right')
for ny, nx in up:
if 0<=ny<h and 0<=nx<w and dp[ny][nx] >= n + 100 and board[ny][nx] == 0:
dfs(ny, nx, board, dp, n+100, 'down')
else:
for ny, nx in right:
if 0<=ny<h and 0<=nx<w and dp[ny][nx] >= n + 100 and board[ny][nx] == 0:
dfs(ny, nx, board, dp, n+100, 'right')
for ny, nx in up:
if 0<=ny<h and 0<=nx<w and dp[ny][nx] >= n + 500 and board[ny][nx] == 0:
dfs(ny, nx, board, dp, n+600, 'down')
def solution(board):
h = len(board)
w = len(board[0])
dp = [[9999999]*w for _ in range(h)]
dfs(0, 0, board, dp, 0, 'down')
dfs(0, 0, board, dp, 0, 'right')
for row in dp:
print(row)
return dp[h-1][w-1]
print(solution([[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0],[0,0,0,0,0,1,0,0],[0,0,0,0,1,0,0,0],[0,0,0,1,0,0,0,1],[0,0,1,0,0,0,1,0],[0,1,0,0,0,1,0,0],[1,0,0,0,0,0,0,0]])) | [
"blackgoldace@naver.com"
] | blackgoldace@naver.com |
324462ca0e87f7e808df4fee2e9d61aae130a9e7 | 7f52845b5aca331ac200565f897b2b1ba3aa79d9 | /m251/exp_groups/paper/ablations/reg_intermediate/launch/launch_fisher.py | 08a2048f9548880d8b25e0b4830dea7da80cae64 | [] | no_license | mmatena/m251 | f8fb4ba9c10cd4dfcf5ee252f80e4832e4e86aa0 | e23249cf0896c5b42bcd07de70f7b9996d8b276b | refs/heads/master | 2023-05-06T10:44:10.945534 | 2021-06-03T15:07:29 | 2021-06-03T15:07:29 | 321,217,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | """
export PYTHONPATH=$PYTHONPATH:~/Desktop/projects/m251:~/Desktop/projects/del8
python3 m251/exp_groups/paper/ablations/reg_intermediate/launch/launch_fisher.py
"""
from del8.executors.gce import gce
from del8.executors.vastai import vastai
from del8.executors.vastai import api_wrapper
from m251.exp_groups.paper.ablations.reg_intermediate import fisher
EXP = fisher.FisherComputation
launch_params = gce.GceParams()
vast_params = vastai.create_supervisor_params(
EXP,
num_workers=6,
offer_query=vastai.OfferQuery(
queries_str=" ".join(
[
"reliability > 0.95",
"num_gpus=1",
"dph < 0.5",
"inet_down > 75",
"inet_up > 75",
"dlperf >= 16",
"cuda_vers >= 11.0 has_avx = true",
]
),
order_str="dlperf_usd-",
),
disk_gb=12,
)
offers = api_wrapper.query_offers(vast_params)
print(f"Number of acceptable offers: {len(offers)}")
execution_items = EXP.create_all_execution_items()
print(f"Number of execution items to process: {len(execution_items)}")
node, deploy = gce.launch(execution_items, vast_params, launch_params)
| [
"michael.matena@gmail.com"
] | michael.matena@gmail.com |
7291c077f29384a0575867612672d234f52945ef | 0674b9d8a34036a6bbe2052e1cae0eee9a44554b | /SWEA/2819.py | 7d8af0a98a560adf5d023ddc155ace8677c6cce2 | [] | no_license | timebird7/Solve_Problem | 02fb54e90844a42dc69a78afb02cc10a87eda71c | 2d54b6ecbe3edf9895fd8303cbca99b3f50f68f3 | refs/heads/master | 2020-04-14T23:37:15.354476 | 2019-04-15T14:32:41 | 2019-04-15T14:32:41 | 164,208,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | dx = [0,1,0,-1]
dy = [1,0,-1,0]
def dfs(i,j,result):
global results, nums
if len(result) == 7:
results.add(result)
return
else:
for k in range(4):
x = i+dx[k]
y = j+dy[k]
if 0<=x<4 and 0<=y<4:
dfs(x,y,result+nums[x][y])
TC = int(input())
for tc in range(1,TC+1):
nums = [input().split() for i in range(4)]
results = set()
for i in range(4):
for j in range(4):
dfs(i,j,'')
print(f'#{tc} {len(results)}') | [
"timebird7@gmail.com"
] | timebird7@gmail.com |
18c6913fb81bb7788f18e1237a2b7dc2fcdbd837 | 1edfd072fae205d766e7c488f1af64f3af9fc23a | /src/python/shared/resend_new_sensor_messages.py | be8f468dc97b084dc81da4b2226a4c064fde4aec | [] | no_license | kth-social-robotics/multisensoryprocessing | 17fc96eb3776642de1075103eeb461125020c892 | 867abe6c921fbf930ac26e0f43a8be0404817bcd | refs/heads/master | 2021-01-21T11:50:16.348566 | 2018-11-05T14:48:42 | 2018-11-05T14:48:42 | 102,027,696 | 4 | 2 | null | 2018-02-20T15:14:22 | 2017-08-31T17:39:58 | C++ | UTF-8 | Python | false | false | 371 | py | import zmq
import time
from threading import Thread
def resend_new_sensor_messages():
def run():
time.sleep(2)
context = zmq.Context()
s = context.socket(zmq.REQ)
s.connect('tcp://localhost:45322')
s.send_string('new_sensor')
s.recv()
thread2 = Thread(target = run)
thread2.deamon = True
thread2.start()
| [
"pjjonell@kth.se"
] | pjjonell@kth.se |
2b8fd85409da823f4be40451742e7f66a75e29b9 | d0f2f7f220c825d827643ca81a08a23cfb871965 | /backend/code/rankor/events/actions.py | a6967842eddad33fbcb6a37f79240f295caf6c74 | [] | no_license | socek/rankor | 7e5e73f8f13bc3d12bd1b18ef01bef04f8f38f0a | eaf5002dd1e852895670517a8cdcb07bf7c69f66 | refs/heads/master | 2021-04-12T07:52:20.341699 | 2018-06-03T20:07:17 | 2018-06-03T20:07:17 | 125,769,351 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | from rankor import app
from rankor.events.drivers import ScreenCommand
class Event(object):
def __init__(self, **kwargs):
self.data = kwargs
def send(self):
with app as context:
command = ScreenCommand(context.dbsession)
command.send_event(self.name, **self.data)
class ChangeViewEvent(Event):
name = 'change_view'
_views = ('welcome', 'highscore', 'question', 'questions')
def __init__(self, screen_id, view):
assert view in self._views
super().__init__(screen_id=screen_id, view=view)
class ShowQuestionEvent(Event):
name = 'show_question'
def __init__(self, screen_id, question_id, team_id, answer_id):
super().__init__(
screen_id=screen_id,
view='question',
question_id=question_id,
team_id=team_id,
answer_id=answer_id
)
class AttachTeamEvent(Event):
name = 'attach_team'
def __init__(self, screen_id, team_id):
super().__init__(
screen_id=screen_id,
view='question',
team_id=team_id,
)
class SelectAnswerEvent(Event):
name = 'select_answer'
def __init__(self, screen_id, answer_id):
super().__init__(
screen_id=screen_id,
view='question',
answer_id=answer_id,
)
class VeryfiAnswerEvent(Event):
name = 'veryfi_answer'
def __init__(self, screen_id, question_id, team_id, answer_id, game_answer_id):
super().__init__(
screen_id=screen_id,
view='question',
question_id=question_id,
answer_id=answer_id,
team_id=team_id,
game_answer_id=game_answer_id,
)
| [
"d.dlugajczyk@clearcode.cc"
] | d.dlugajczyk@clearcode.cc |
786381c19e3ac9f1748404798c53e1b36386ee54 | 65c8a6a7af2ee8cdf3866d012ea814887bd68a26 | /ppro360_automation/Ppro360/CoachingAndTriadCoaching_Pages/GoalSettingandNotesForm.py | 8b48f68d9c71cdb51a156bdbf127b429440669d4 | [] | no_license | 1282270620/automation_test | 9b3c595c3f7a139ded0a638ae4bcf31e0b7f9686 | 3faf86f0d641089eaf27eba906d22157dd2c1f5d | refs/heads/master | 2020-04-01T06:35:33.873989 | 2018-10-21T03:05:17 | 2018-10-21T03:05:17 | 152,954,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | '''
Created on 20170703
@author: lei.tan
'''
from selenium.webdriver.common.by import By
from Tablet_pages import BasePage
class GoalSettingandNotesForm(BasePage.Action):
def __init__(self):
self.callRecordingNumber_loc=(By.XPATH,"//*[@id='container']/div/section/div/form/div/div[3]/div[2]/div/div/input")
self.KPIcheckbox_path="//*[@id='container']/div/section/div/form/div[2]/div[1]/div/table/tbody/tr[4]/td[%d]/i"
self.commentsinput_path="//*[@id='container']/div/section/div/form/div[2]/div[3]/div/table/tbody/tr[%d]/td/textarea"
def click_KPIcheckbox (self, checkboxorderindex):
self.KPIcheckbox_loc=(By.XPATH,self.KPIcheckbox_path %checkboxorderindex)
self.find_element(*self.KPIcheckbox_loc).click()
def input_callRecordingNumber (self,text):
self.find_element(*self.callRecordingNumber_loc).send_keys(text);
def input_commentsinput (self,text,lineindex):
self.commentsinput_loc=(By.XPATH,self.commentsinput_path %lineindex)
self.find_element(*self.commentsinput_loc).send_keys(text); | [
"1282270620@qq.com"
] | 1282270620@qq.com |
c8dbaa1c2f5647253e3c3ab032c30dfed6d6b97a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_collaborative.py | d1a3b8e956f1707aae7d0b8e2c6db302aa7bf14b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py |
#calss header
class _COLLABORATIVE():
def __init__(self,):
self.name = "COLLABORATIVE"
self.definitions = [u'involving two or more people working together for a special purpose: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5f373d64ed1ef5aa6fd75dcd19e8c78dffc2d86c | c33968b06f072ae36849e6e61a7e7ce3d6ae364b | /setup.py | 5fdc74fde120ab304fd3e5586df19fea92a2ae57 | [] | no_license | talkara/abita.locales | 051bb200bf81a66515cbdad61d3bb76ac44665f7 | b4135e30a9f6c703999905801f388ae8ee91e2a6 | refs/heads/master | 2021-05-27T05:01:08.192155 | 2013-05-13T10:19:36 | 2013-05-13T10:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from setuptools import find_packages
from setuptools import setup
setup(
name='abita.locales',
version='0.2.1',
description="Overrides default translations of Plone for ABITA site.",
long_description=open("README.rst").read(),
classifiers=[
"Framework :: Plone",
"Framework :: Plone :: 4.2",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7"],
keywords='',
author='Taito Horiuchi',
author_email='taito.horiuchi@abita.fi',
url='https://github.com/taito/abita.locales',
license='None-free',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['abita'],
include_package_data=True,
zip_safe=False,
install_requires=[
'Products.CMFPlone',
'setuptools'],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""")
| [
"taito.horiuchi@gmail.com"
] | taito.horiuchi@gmail.com |
f3a6bbd121b4706726205070aa94817f0637d4bd | ae02e46f8bbe7a8db4a02f6e1b0523a6bb03e5d0 | /wtq/wtq/items.py | faaf346da58460e2516c1ed3fe86c297bedede8e | [
"Apache-2.0"
] | permissive | wangtianqi1993/ScrapyProject | d0cb0a2f86b4f5930067cfefed67efe0c1dc8422 | 4b7b7d1c683bc412d70d8d15be36bcc71967b491 | refs/heads/master | 2020-07-14T11:10:33.746789 | 2016-09-16T08:53:46 | 2016-09-16T08:53:46 | 67,754,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Field
class WtqItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = Field()
description = Field()
url = Field()
| [
"1906272972@qq.com"
] | 1906272972@qq.com |
59107bc428d0d9a5dc51a319b1fba875b846ca4e | cfdc0187ca770c09fa7a5868caedd1b5de0c668f | /pythoncollections/listprograms/binarysearch.py | 7bd76665a2bbf1120255c658e143462f24c07f7c | [] | no_license | nelvinpoulose999/Pythonfiles | 41ef15c9e3423ef5c8c121db7c6196772c40aa27 | 1e2129b1e1fb2a55100622fc83822726c7836224 | refs/heads/master | 2023-03-13T22:24:38.049100 | 2021-03-04T07:12:14 | 2021-03-04T07:12:14 | 333,663,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Binary search
arr=[12,50,8,25,60,30,31]
element=int(input('enter the element'))
flag=0
arr.sort()
print(arr)
length=len(arr)
print(length)
lowl=0
uppl=len(arr)-1
print(uppl)
while (lowl<=uppl):
mid=(lowl+uppl)//2
#print(arr[mid])
if(element>arr[mid]):
lowl=mid+1
elif(element<arr[mid]):
uppl=mid-1
elif(element==arr[mid]):
flag=1
break
if(flag==1):
print('element found',arr.index(element))
else:
print('not found') | [
"nelvinpoulose999@gmail.com"
] | nelvinpoulose999@gmail.com |
35b37da4a41cd4e5cae2d72380c4ee7bcf1cb6c3 | bcb4c127578d2874ce445e021bf276ff07d6fa70 | /476.number-complement.py | 6faf920375a12022c3628e2e374cc57b3bd0135b | [] | no_license | SilverMaple/LeetCode | 3f9a4ef95cbaaed3238ad1dd41b6e6182c64575c | 581486a1e08dbceddcb64da4f6c4ed6c73ed5e84 | refs/heads/master | 2020-07-26T06:37:08.897447 | 2020-03-31T14:55:18 | 2020-03-31T14:55:18 | 208,566,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | #
# @lc app=leetcode id=476 lang=python
#
# [476] Number Complement
#
# @lc code=start
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
if num == 0:
return 1
mask = 1<<30
while num&mask == 0:
mask >>= 1
mask = (mask<<1) - 1
return num ^ mask
# @lc code=end
| [
"1093387079@qq.com"
] | 1093387079@qq.com |
9d96fadf5961ab0e99b70dc45911f05a86b5c463 | f42c91f5fa040c739ab6bc1803f3253561f670fd | /mongodb/pymongo_page.py | 073513604cb6653301062657c400ed3be9b35c45 | [] | no_license | hackrole/daily-python | 11f29c698464172118ae30d7e5920692a63c3d8e | 2f1fb3b9646fa1c7131df6e336b6afd38128fbbd | refs/heads/master | 2022-10-16T08:06:14.067055 | 2022-09-19T13:35:17 | 2022-09-19T13:35:17 | 9,448,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | #!/usr/bin/env python
#coding=utf8
"""
使用pymongo实现的分页程序, 不使用skip,limit实现。
通过比较id来实现
"""
import unittest
from pymongo.connection import MongoClient
def init_connect(dbname="test", collection="page", host="localhost", port=27017):
""" init the mongodb connection """
conn = MongoClient(host, port)
coll = conn[dbname][collection]
return conn, coll
def page(cur_id, old_page, new_page):
"""
分页主程
cur_id: 当前页的第一元素id.
old_page: 当前页数
new_page: 请求的新页数
"""
# TODO: the first request without cur_id handle
conn, coll = init_connect()
page_size = 20
page_skip_limit = 8
N = abs(old_page - new_page)
if N >= page_skip_limit:
raise Exception("not allow page skip more than %s" % page_skip_limit)
if old_page > new_page: # go N page before
data = coll.find({'_id': {'$lt': cur_id}}).skip(N * page_size).limit(page_size).sort({'_id': 1})
else:
data = coll.find({'_id': {'$gt': cur_id}}).skip((N-1) * page_size).limit(page_size).sort({'_id': 1})
return data
class PageTest(unittest.TestCase):
def init_db_for_test(self):
pass
def setUp(self):
pass
def test_page():
pass
if __name__ == "main":
unittest.main()
| [
"daipeng123456@gmail.com"
] | daipeng123456@gmail.com |
b82d8815d2881d84d7c9ed9d1df9ecbc36139998 | 25040bd4e02ff9e4fbafffee0c6df158a62f0d31 | /www/htdocs/wt/lapnw/data/item_32_3.tmpl.py | fc25344506c03475ee3a5f02879f2ed019e7c021 | [] | no_license | erochest/atlas | 107a14e715a058d7add1b45922b0f8d03bd2afef | ea66b80c449e5b1141e5eddc4a5995d27c2a94ee | refs/heads/master | 2021-05-16T00:45:47.585627 | 2017-10-09T10:12:03 | 2017-10-09T10:12:03 | 104,338,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
from lap.web.templates import GlobalTemplate, SubtemplateCode
class main(GlobalTemplate):
title = 'Page.Item: 32.3'
project = 'lapnw'
class page(SubtemplateCode):
pass
| [
"eric@eric-desktop"
] | eric@eric-desktop |
e83d7beec0fdb6a3185731cf8ce3b1b1dd178705 | ae7627dc5e6ef7e9f8db9d825a6bc097da5b34de | /python assignment ii/question_no_9.py | a029afae4c12501240f18af005dfc09016529a72 | [] | no_license | atulzh7/IW-Academy-Python-Assignment | cc5c8a377031097aff5ef62b209cb31f63241505 | 674d312b1438301865c840257686edf60fdb3a69 | refs/heads/master | 2022-11-14T13:26:16.747544 | 2020-07-12T16:06:08 | 2020-07-12T16:06:08 | 283,823,502 | 0 | 1 | null | 2020-07-30T16:24:30 | 2020-07-30T16:24:29 | null | UTF-8 | Python | false | false | 717 | py | """Binary search function
"""
def binary_search(sequence, item):
#initialization yo avoid garbage value in variable
high = len(sequence) - 1
low = 0
mid = 0
while low <= high:
mid = (high + low) // 2
if arr[mid] < item:
low = mid + 1
elif arr[mid] > item:
high = mid - 1
else:
return mid
else:
return -1
# Sample test array
arr = [ 2, 3, 4, 10, 40 ]
user_input = int(input("Enter an integer value to see if it exits: "))
result = binary_search(arr, user_input)
if result != -1:
print("Element is present at index", str(result))
else:
print("Element is not present in array") | [
"="
] | = |
306a7721152e9eb6b28e662d6568fd7fc185c704 | 6bf336bc8d6ba061e0c707bdd8595368dee4d27b | /tutorials/10_days_of_statistics/poisson_distribution_i.py | 37fcfd7a6e1dc201e075a552306733d0ecbb4876 | [
"MIT"
] | permissive | avenet/hackerrank | aa536214dbccf5a822a30ea226e1dbaac9afb243 | e522030a023af4ff50d5fc64bd3eba30144e006c | refs/heads/master | 2021-01-01T20:15:06.647873 | 2017-11-24T23:59:19 | 2017-11-24T23:59:19 | 98,801,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | from functools import reduce
from operator import mul
E = 2.71828
def poisson_distribution(t, k):
success_probability = t ** k
distribution_part = 1.0 / (E ** t)
successes_combination = fact(k)
return (
(
success_probability * distribution_part
) / successes_combination
)
def fact(number):
if number == 0:
return 1
return reduce(
mul,
range(1, number + 1)
)
mean = float(input())
random_variable_value = int(input())
print(
round(
poisson_distribution(
mean,
random_variable_value
),
3
)
)
| [
"andy.venet@gmail.com"
] | andy.venet@gmail.com |
06df6ae711be16094a2119dad9e994a87ba43a11 | ec7fca4065a12bada3fdf9e92fc7c52ae9cddc83 | /setup.py | 8fa2b083f93c92d20535997bf324642bd9783890 | [
"MIT"
] | permissive | SLongofono/sneakysnek | c67d913801ad3c0bb320c8fca0ad645faf56eb2d | 3001e83755da6337f42e3136cb5e29d098800ddf | refs/heads/master | 2021-09-05T15:40:56.030291 | 2018-01-29T10:45:07 | 2018-01-29T10:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | #!/usr/bin/env python
from setuptools import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = ""
packages = [
"sneakysnek",
"sneakysnek.recorders"
]
requires = []
extras_require = {
":sys_platform == 'darwin'": ["pyobjc-framework-Quartz"],
":'linux' in sys_platform": ["python-xlib"]
}
setup(
name='sneakysnek',
version="0.1.0",
description="Dead simple cross-platform keyboard & mouse global input capture solution for Python 3.6+",
long_description=long_description,
author="Nicholas Brochu",
author_email='nicholas@serpent.ai',
packages=packages,
include_package_data=True,
install_requires=requires,
extras_require=extras_require,
entry_points={
'console_scripts': ['sneakysnek = sneakysnek.recorder:demo']
},
license='MIT',
url='https://github.com/SerpentAI/sneakysnek',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6'
]
)
| [
"info@nicholasbrochu.com"
] | info@nicholasbrochu.com |
2e6c44452b9a4d23c7de2e2283bec77b1394bc6e | 10e5b1b2e42a2ff6ec998ed900071e8b5da2e74e | /design/1381_design_a_stack_with_increment_operation/1381_design_a_stack_with_increment_operation.py | 616a72f862d666343aeae68684a63a2efdd8e0c2 | [] | no_license | zdyxry/LeetCode | 1f71092d687316de1901156b74fbc03588f0b0a5 | b149d1e8a83b0dfc724bd9dc129a1cad407dd91f | refs/heads/master | 2023-01-29T11:59:14.162531 | 2023-01-26T03:20:23 | 2023-01-26T03:20:23 | 178,754,208 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 549 | py |
class CostomStack(object):
def __init__(self, maxSize):
self.n = maxSize
self.stack = []
self.inc = []
def push(self, x):
if len(self.inc) < self.n:
self.stack.append(x)
self.inc.append(0)
def pop(self):
if not self.inc:
return -1
if len(self.inc) > 1:
self.inc[-2] += self.inc[-1]
return self.stack.pop() + self.inc.pop()
def increment(self, k, val):
if self.inc:
self.inc[min(k, len(self.inc))-1] += val | [
"zdyxry@gmail.com"
] | zdyxry@gmail.com |
248014df533f845174e5ea4d9eb271296a649d61 | 9feac0e0cc68891707772d5b557a44e87377dec5 | /fuzzers/071-ps8-bufg/generate_permutations.py | 58442f658c90c606f0ec59287e1638a22d31b1ea | [
"LicenseRef-scancode-dco-1.1",
"ISC"
] | permissive | daveshah1/prjuray | 8213030dd83f69b0caba488d4154d006a08c648b | 02b31b5d7c19f66f50b3a28218921433df8a9af8 | refs/heads/master | 2022-11-26T14:50:46.500424 | 2020-07-17T23:23:39 | 2020-07-17T23:23:39 | 280,824,403 | 2 | 0 | ISC | 2020-07-19T08:32:10 | 2020-07-19T08:32:10 | null | UTF-8 | Python | false | false | 2,600 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import csv
from utils.clock_utils import MAX_GLOBAL_CLOCKS
import random
def output_iteration(tile, bufg_inputs, bufg_outputs):
""" Output 1 iteration of BUFG_PS inputs -> BUFG_PS outputs.
This function generates a set of permutations of 1-18 BUFG input pins to
one of the 24 BUFG_PS output pins. Each iteration ensures that each input
reachs each of the outputs.
Only 1 iteration of this function is required to ensure that all available
inputs reach all available outputs, but this function does not ensure
uncorrelated solutions. More iterations increase the change of an
uncorrelated solution.
"""
inputs_to_sinks = {}
for idx, _ in enumerate(bufg_inputs):
inputs_to_sinks[idx] = sorted(bufg_outputs)
random.shuffle(inputs_to_sinks[idx])
while True:
outputs = {}
inputs = sorted(inputs_to_sinks.keys())
random.shuffle(inputs)
for idx in inputs:
for output in inputs_to_sinks[idx]:
if output not in outputs:
outputs[output] = idx
break
if output in outputs and outputs[output] == idx:
inputs_to_sinks[idx].remove(output)
if len(outputs) == 0:
break
output_str = ["" for _ in bufg_outputs]
for output in outputs:
output_str[output] = str(outputs[output])
print('{},{}'.format(tile, ','.join(output_str)))
def main():
random.seed(0)
bufg_inputs_to_tiles = {}
with open('ps8_bufg_pin_map.csv') as f:
for row in csv.DictReader(f):
clock_tiles = row['clock_tiles'].split(' ')
assert len(clock_tiles) == 1, (row['pin'], clock_tiles)
tile = clock_tiles[0]
if tile not in bufg_inputs_to_tiles:
bufg_inputs_to_tiles[tile] = []
bufg_inputs_to_tiles[tile].append(row['pin'].split('/')[0])
bufg_outputs = list(range(MAX_GLOBAL_CLOCKS))
print('tile,{}'.format(','.join(
'bufg{}_input'.format(output) for output in bufg_outputs)))
NUM_ITERATIONS = 3
for _ in range(NUM_ITERATIONS):
for tile in bufg_inputs_to_tiles:
output_iteration(tile, bufg_inputs_to_tiles[tile], bufg_outputs)
if __name__ == "__main__":
main()
| [
"537074+litghost@users.noreply.github.com"
] | 537074+litghost@users.noreply.github.com |
bc2368589f35d8fc2648dca4462a81428cd140ee | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/ProdResource.py | d6f8dde13be2bb3de55e548a2e06cf35597c36d8 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,018 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ProdResource(object):
def __init__(self):
self._key = None
self._name = None
self._type = None
self._value = None
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_alipay_dict(self):
params = dict()
if self.key:
if hasattr(self.key, 'to_alipay_dict'):
params['key'] = self.key.to_alipay_dict()
else:
params['key'] = self.key
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
if self.value:
if hasattr(self.value, 'to_alipay_dict'):
params['value'] = self.value.to_alipay_dict()
else:
params['value'] = self.value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ProdResource()
if 'key' in d:
o.key = d['key']
if 'name' in d:
o.name = d['name']
if 'type' in d:
o.type = d['type']
if 'value' in d:
o.value = d['value']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
34880cf8e8a7fb475fbacb1cb45b653e69cd3101 | c44d1735c21164bb9facc7eac1412a41295130a9 | /raven/contrib/flask.py | 93e2f5420d4af502ba410fe44bd2dfd46ad0b6ba | [
"BSD-3-Clause"
] | permissive | joshma/raven-python | 082b237b73e5f8f825fe013b6fdc3144055bfeec | 9f02875b6bf120c28c455547d6f7b2618ffbf070 | refs/heads/master | 2021-01-22T13:17:16.202650 | 2013-12-13T01:48:55 | 2013-12-13T01:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,569 | py | """
raven.contrib.flask
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
try:
from flask_login import current_user
except ImportError:
has_flask_login = False
else:
has_flask_login = True
import sys
import os
import logging
from flask import request
from flask.signals import got_request_exception
from raven.conf import setup_logging
from raven.base import Client
from raven.middleware import Sentry as SentryMiddleware
from raven.handlers.logging import SentryHandler
from raven.utils.compat import _urlparse
from raven.utils.wsgi import get_headers, get_environ
from werkzeug.exceptions import ClientDisconnected
def make_client(client_cls, app, dsn=None):
return client_cls(
dsn=dsn or app.config.get('SENTRY_DSN') or os.environ.get('SENTRY_DSN'),
include_paths=set(app.config.get('SENTRY_INCLUDE_PATHS', [])) | set([app.import_name]),
exclude_paths=app.config.get('SENTRY_EXCLUDE_PATHS'),
servers=app.config.get('SENTRY_SERVERS'),
name=app.config.get('SENTRY_NAME'),
public_key=app.config.get('SENTRY_PUBLIC_KEY'),
secret_key=app.config.get('SENTRY_SECRET_KEY'),
project=app.config.get('SENTRY_PROJECT'),
site=app.config.get('SENTRY_SITE_NAME'),
processors=app.config.get('SENTRY_PROCESSORS'),
string_max_length=app.config.get('SENTRY_MAX_LENGTH_STRING'),
list_max_length=app.config.get('SENTRY_MAX_LENGTH_LIST'),
extra={
'app': app,
},
)
class Sentry(object):
"""
Flask application for Sentry.
Look up configuration from ``os.environ['SENTRY_DSN']``::
>>> sentry = Sentry(app)
Pass an arbitrary DSN::
>>> sentry = Sentry(app, dsn='http://public:secret@example.com/1')
Pass an explicit client::
>>> sentry = Sentry(app, client=client)
Automatically configure logging::
>>> sentry = Sentry(app, logging=True, level=logging.ERROR)
Capture an exception::
>>> try:
>>> 1 / 0
>>> except ZeroDivisionError:
>>> sentry.captureException()
Capture a message::
>>> sentry.captureMessage('hello, world!')
By default, the Flask integration will do the following:
- Hook into the `got_request_exception` signal. This can be disabled by
passing `register_signal=False`.
- Wrap the WSGI application. This can be disabled by passing
`wrap_wsgi=False`.
- Capture information from Flask-Login (if available).
"""
def __init__(self, app=None, client=None, client_cls=Client, dsn=None,
logging=False, level=logging.NOTSET, wrap_wsgi=True,
register_signal=True):
self.dsn = dsn
self.logging = logging
self.client_cls = client_cls
self.client = client
self.level = level
self.wrap_wsgi = wrap_wsgi
self.register_signal = register_signal
if app:
self.init_app(app)
def handle_exception(self, *args, **kwargs):
if not self.client:
return
ignored_exc_type_list = self.app.config.get('RAVEN_IGNORE_EXCEPTIONS', [])
exc = sys.exc_info()[1]
if any((isinstance(exc, ignored_exc_type) for ignored_exc_type in ignored_exc_type_list)):
return
self.captureException(exc_info=kwargs.get('exc_info'))
def get_user_info(self, request):
"""
Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed
and setup
"""
if not has_flask_login:
return
if not hasattr(self.app, 'login_manager'):
return
try:
is_authenticated = current_user.is_authenticated()
except AttributeError:
# HACK: catch the attribute error thrown by flask-login is not attached
# > current_user = LocalProxy(lambda: _request_ctx_stack.top.user)
# E AttributeError: 'RequestContext' object has no attribute 'user'
return {}
if is_authenticated:
user_info = {
'is_authenticated': True,
'is_anonymous': current_user.is_anonymous(),
'id': current_user.get_id(),
}
if 'SENTRY_USER_ATTRS' in self.app.config:
for attr in self.app.config['SENTRY_USER_ATTRS']:
if hasattr(current_user, attr):
user_info[attr] = getattr(current_user, attr)
else:
user_info = {
'is_authenticated': False,
'is_anonymous': current_user.is_anonymous(),
}
return user_info
def get_http_info(self, request):
urlparts = _urlparse.urlsplit(request.url)
try:
formdata = request.form
except ClientDisconnected:
formdata = {}
return {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': formdata,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
def before_request(self, *args, **kwargs):
self.client.http_context(self.get_http_info(request))
self.client.user_context(self.get_user_info(request))
def init_app(self, app, dsn=None):
self.app = app
if dsn is not None:
self.dsn = dsn
if not self.client:
self.client = make_client(self.client_cls, app, self.dsn)
if self.logging:
setup_logging(SentryHandler(self.client, level=self.level))
if self.wrap_wsgi:
app.wsgi_app = SentryMiddleware(app.wsgi_app, self.client)
app.before_request(self.before_request)
if self.register_signal:
got_request_exception.connect(self.handle_exception, sender=app)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['sentry'] = self
def captureException(self, *args, **kwargs):
assert self.client, 'captureException called before application configured'
return self.client.captureException(*args, **kwargs)
def captureMessage(self, *args, **kwargs):
assert self.client, 'captureMessage called before application configured'
return self.client.captureMessage(*args, **kwargs)
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
e1c60de6b9628d02d92def760be54bcc5aad7781 | 1edee17385db53395352e91cf9f4b566a0f07b45 | /17_requests.py | 1837ed08298f814eee55c996a40c163042906474 | [] | no_license | Zerl1990/2020_python_workshop | 705fbdd280aea8d75df0c7d14f1ef617c509cd08 | 78a13962f8c1ab6bc0ef85558a0b54845ac8c3a1 | refs/heads/master | 2022-11-26T20:36:47.212113 | 2020-08-08T19:02:13 | 2020-08-08T19:02:13 | 281,791,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | # Import requests library
import requests
# Define url
url = 'https://api.openaq.org/v1/cities?country=MX'
# Execute a get request to Rest services
response = requests.get(url)
# Print response object
print(f'Response from {url} is {response}')
# Print response status code
print(f'Response status code: {response.status_code}')
# Print raw response content (String)
print(f'Response raw content: {response.raw}')
# Print the response ins json format (Dictionary)
print(f'Response JSON: {response.json()}')
# Print variable type of json response
print(f'Response JSON type: {type(response.json())}')
| [
"luis.m.rivas@oracle.com"
] | luis.m.rivas@oracle.com |
c4109be7d713a6bb1f9c59d02bfcb0a60da154a3 | 81c8baf31e15cf132b22cc489e7c8fc7b86003a4 | /fulltext/registry.py | 39ab6bd2ff12756510edfc1c3e52bf2c15d8018d | [
"MIT"
] | permissive | LinuxOSsk/Shakal-NG | 0b0030af95a8dad4b120ae076920aa3a4020c125 | 93631496637cd3847c1f4afd91a9881cafb0ad83 | refs/heads/master | 2023-09-04T04:27:05.481496 | 2023-08-30T04:10:41 | 2023-08-30T04:10:41 | 2,168,932 | 11 | 8 | MIT | 2023-08-16T03:34:02 | 2011-08-07T14:36:25 | Python | UTF-8 | Python | false | false | 724 | py | # -*- coding: utf-8 -*-
from collections import defaultdict
from django.utils.functional import cached_property
class FulltextRegister(object):
def __init__(self):
self.__registered = []
self.__by_model = defaultdict(list)
def __iter__(self):
return iter(self.__registered)
def register(self, fulltext):
fulltext.register = self
self.__registered.append(fulltext)
self.__by_model[fulltext.model].append(fulltext)
def get_for_model(self, cls):
return self.__by_model.get(cls, [])
@cached_property
def index_class(self):
from .models import SearchIndex
return SearchIndex
@cached_property
def updated_field(self):
return self.index_class.get_updated_field()
register = FulltextRegister()
| [
"miroslav.bendik@gmail.com"
] | miroslav.bendik@gmail.com |
6b0de98bbc67259392d7f7b7ce46d6038aa470df | 963e0c0a12699890fb8303e8272a58a9c78d5e1d | /networks/resnet_encoder.py | de62869855316ff5cfec6fd0958c60528db53a32 | [] | no_license | TWJianNuo/Stereo_SDNET | 83cb98adf083daae44382f8c0198683a672b5045 | e2a13984abb952b1083cbf06c51ffcc9d3fd511d | refs/heads/master | 2020-08-14T11:21:41.485309 | 2019-10-30T20:09:30 | 2019-10-30T20:09:30 | 215,158,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,254 | py | # Copyright Niantic 2019. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
class ResNetMultiImageInput(models.ResNet):
"""Constructs a resnet model with varying number of input images.
Adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""
def __init__(self, block, layers, num_classes=1000, num_input_images=1, add_mask = 0):
super(ResNetMultiImageInput, self).__init__(block, layers)
self.inplanes = 64
self.conv1 = nn.Conv2d(
num_input_images * 3 + add_mask, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def resnet_multiimage_input(num_layers, pretrained=False, num_input_images=1, add_mask = 0):
"""Constructs a ResNet model.
Args:
num_layers (int): Number of resnet layers. Must be 18 or 50
pretrained (bool): If True, returns a model pre-trained on ImageNet
num_input_images (int): Number of frames stacked as input
"""
assert num_layers in [18, 50], "Can only run with 18 or 50 layer resnet"
blocks = {18: [2, 2, 2, 2], 50: [3, 4, 6, 3]}[num_layers]
block_type = {18: models.resnet.BasicBlock, 50: models.resnet.Bottleneck}[num_layers]
model = ResNetMultiImageInput(block_type, blocks, num_input_images=num_input_images, add_mask = add_mask)
if pretrained:
loaded = model_zoo.load_url(models.resnet.model_urls['resnet{}'.format(num_layers)])
loaded['conv1.weight'] = torch.cat([loaded['conv1.weight']] * num_input_images, 1) / num_input_images
loaded['conv1.weight'] = torch.cat([loaded['conv1.weight'], model.conv1.weight[:, -(add_mask + 1):-add_mask, :, :].clone()], 1)
model.load_state_dict(loaded)
return model
class ResnetEncoder(nn.Module):
"""Pytorch module for a resnet encoder
"""
def __init__(self, num_layers, pretrained, num_input_images=1, add_mask = 0):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {18: models.resnet18,
34: models.resnet34,
50: models.resnet50,
101: models.resnet101,
152: models.resnet152}
if num_layers not in resnets:
raise ValueError("{} is not a valid number of resnet layers".format(num_layers))
if num_input_images > 1:
self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images, add_mask)
else:
self.encoder = resnets[num_layers](pretrained)
if num_layers > 34:
self.num_ch_enc[1:] *= 4
def forward(self, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
| [
"twjiannuo@gmail.com"
] | twjiannuo@gmail.com |
1b8d80d1332c15f78159e1c04c61c731bc87fb22 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/grains/cb2dd80523174feda773833adb8eed42.py | af33b9d4b60b65c2f8450d06dc4c77109a1c5eb3 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 171 | py | #grains.py
#Surely a bread winner
def on_square(square):
return 2**(square-1)
def total_after(number):
return sum(on_square(num) for num in range(1,number+1))
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
2e257e7f0a607d965283c7f5fe7e047e6aebca51 | 3b4c8c8b2e9e372a6b5d3c2fe717a3cadab4abd0 | /stepler/nova/fixtures/hypervisor.py | c17b0bb7651f9d50c14f45c47db7bfb16df8227d | [] | no_license | Mirantis/stepler-draft | 242b25e116715c6550414826c7e5a3f212216833 | 2d85917ed9a35ee434d636fbbab60726d44af3a1 | refs/heads/master | 2021-05-01T01:02:49.670267 | 2016-12-01T10:39:37 | 2016-12-01T11:04:50 | 74,979,322 | 0 | 0 | null | 2020-02-26T12:07:21 | 2016-11-28T14:11:45 | Python | UTF-8 | Python | false | false | 1,714 | py | """
-------------------
Hypervisor fixtures
-------------------
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from stepler.nova import steps
__all__ = [
'hypervisor_steps',
'sorted_hypervisors',
]
@pytest.fixture
def hypervisor_steps(nova_client):
"""Fixture to get hypervisor steps.
Args:
nova_client (object): instantiated nova client
Returns:
stepler.nova.steps.HypervisorSteps: instantiated hypervisor steps
"""
return steps.HypervisorSteps(nova_client.hypervisors)
@pytest.fixture
def sorted_hypervisors(hypervisor_steps, flavor):
"""Function fixture to get hypervisors sorted by their capacity.
Args:
hypervisor_steps (obj): instantiated hypervisor steps
flavor (obj): nova flavor
Returns:
list: sorted hypervisors (from biggest to smallest)
"""
hypervisors = hypervisor_steps.get_hypervisors()
suitable_hypervisors = []
for hypervisor in hypervisors:
cap = hypervisor_steps.get_hypervisor_capacity(hypervisor, flavor)
suitable_hypervisors.append((cap, hypervisor))
hypervisors = [hyp for _, hyp in reversed(sorted(suitable_hypervisors))]
return hypervisors
| [
"g.dyuldin@gmail.com"
] | g.dyuldin@gmail.com |
e5cc77396f9b9773ee72ee68e409f7e3bcb4830c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_072/ch29_2019_08_22_16_43_24_439163.py | 7ddebc7b49971299f322b1c09507e3fa1c37b784 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | a=int(input('Salário: '))
def calcula_aumento(a):
y1=a*1.1
y2=a*1.15
if a>1250:
return 'Aumento de R$ {0:.2f}'.format(y1)
else:
return 'Aumento de R$ {0:.2f}'.format(y2)
print(calcula_aumento(a))
| [
"you@example.com"
] | you@example.com |
afbce37d5c69d3bef3b4fd803851db683dbf49c6 | 221cada2354556fbb969f25ddd3079542904ef5d | /Leetcode/264.py | 4a9b3389a6bab222a1a195d16eeeb4c9209d22ac | [] | no_license | syzdemonhunter/Coding_Exercises | 4b09e1a7dad7d1e3d4d4ae27e6e006732ffdcb1d | ca71572677d2b2a2aed94bb60d6ec88cc486a7f3 | refs/heads/master | 2020-05-24T11:19:35.019543 | 2019-11-22T20:08:32 | 2019-11-22T20:08:32 | 187,245,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # https://leetcode.com/problems/ugly-number-ii/
# T: O(n)
# S: O(n)
class Solution:
def nthUglyNumber(self, n: int) -> int:
nums = [0]*n
idx_2, idx_3, idx_5 = 0, 0, 0
nums[0] = 1
for i in range(1, len(nums)):
nums[i] = min(nums[idx_2]*2, min(nums[idx_3]*3, nums[idx_5]*5))
if nums[i] == nums[idx_2]*2:
idx_2 += 1
if nums[i] == nums[idx_3]*3:
idx_3 += 1
if nums[i] == nums[idx_5]*5:
idx_5 += 1
return nums[n - 1] | [
"syzuser60@gmail.com"
] | syzuser60@gmail.com |
b7fe6b8b53471259e84556736d55859f62eb1a81 | 9ea92648cb1bb9c3d91cee76d251981f1551d70a | /Multiplayer/Harrowland/HarrowlandAlt.py | 08745253a9f26c527a94864b225b075efa738c2c | [] | no_license | 1234hi1234/CodeCombat | 5c8ac395ac605e22b3b9501325c3eefbc3180500 | 585989200d965af952d1cdb8aca76e7f3db15c64 | refs/heads/master | 2020-03-15T02:39:07.979096 | 2018-03-12T14:23:29 | 2018-03-12T14:23:29 | 131,923,000 | 0 | 0 | null | 2018-05-03T01:05:43 | 2018-05-03T01:05:43 | null | UTF-8 | Python | false | false | 4,963 | py | enemy_types = {}
enemy_types['knight'] = {'danger': 100, 'focus': 100}
enemy_types['potion-master'] = {'danger': 100, 'focus': 100}
enemy_types['ranger'] = {'danger': 100, 'focus': 100}
enemy_types['trapper'] = {'danger': 100, 'focus': 100}
enemy_types['samurai'] = {'danger': 100, 'focus': 100}
enemy_types['librarian'] = {'danger': 100, 'focus': 100}
enemy_types['sorcerer'] = {'danger': 100, 'focus': 100}
enemy_types['hero-placeholder'] = {'danger': 99, 'focus': 100}
enemy_types['hero-placeholder-1'] = {'danger': 99, 'focus': 100}
enemy_types['hero-placeholder-2'] = {'danger': 99, 'focus': 100}
enemy_types['necromancer'] = {'danger': 100, 'focus': 100}
enemy_types['captain'] = {'danger': 100, 'focus': 100}
enemy_types['goliath'] = {'danger': 100, 'focus': 50}
enemy_types['captain'] = {'danger': 100, 'focus': 100}
enemy_types['forest-archer'] = {'danger': 100, 'focus': 50}
enemy_types['ninja'] = {'danger': 100, 'focus': 50}
enemy_types['soldier'] = {'danger': 90, 'focus': 50}
enemy_types['skeleton'] = {'danger': 90, 'focus': 50}
enemy_types['griffin-rider'] = {'danger': 99, 'focus': 50}
enemy_types['paladin'] = {'danger': 99, 'focus': 50}
enemy_types['burl'] = {'danger': 99, 'focus': 50}
enemy_types['archer'] = {'danger': 50, 'focus': 50}
def findTarget():
danger = 0
enemy_return = None
for type in enemy_types.keys():
if enemy_types[type] and enemy_types[type].danger > danger:
enemy = hero.findNearest(hero.findByType(type))
if enemy and hero.distanceTo(enemy) < enemy_types[type].focus:
enemy_return = enemy
danger = enemy_types[type].danger
return enemy_return
def findTarget():
danger = 0
enemy_return = None
for type in enemy_types.keys():
if enemy_types[type] and enemy_types[type].danger > danger:
enemy = hero.findNearest(hero.findByType(type))
if enemy and enemy.team != hero.team and hero.distanceTo(enemy) < enemy_types[type].focus:
enemy_return = enemy
danger = enemy_types[type].danger
if enemy_return is None:
enemy_return = hero.findNearestEnemy()
return enemy_return
def moveTo(position, fast=True):
if position:
if (hero.isReady("jump") and fast):
hero.jumpTo(position)
else:
hero.move(position)
summonTypes = ['soldier','soldier','soldier','soldier','soldier','soldier','paladin']
def summonTroops():
type = summonTypes[len(hero.built) % len(summonTypes)]
if hero.gold > hero.costOf(type):
hero.summon(type)
def commandTroops():
for index, friend in enumerate(hero.findFriends()):
if friend.type == 'paladin':
CommandPaladin(friend)
elif friend.type == 'soldier' or friend.type == 'archer' or friend.type == 'griffin-rider' or friend.type == 'skeleton':
CommandSoldier(friend)
elif friend.type == 'peasant':
CommandPeasant(friend)
def CommandSoldier(soldier):
hero.command(soldier, "defend", hero)
def CommandPeasant(soldier):
item = soldier.findNearestItem()
if item:
hero.command(soldier, "move", item.pos)
def CommandPaladin(paladin):
if (paladin.canCast("heal") and hero.health<hero.maxHealth):
hero.command(paladin, "cast", "heal", self)
else:
hero.command(paladin, "defend", hero)
def pickUpNearestItem(items):
nearestItem = hero.findNearest(items)
if nearestItem:
moveTo(nearestItem.pos)
def attack():
target = findTarget()
if target:
if (hero.canCast('summon-burl', hero)):
hero.cast('summon-burl')
if (hero.canCast('summon-undead')):
hero.cast('summon-undead')
if (hero.canCast('invisibility', self)):
hero.cast('invisibility', self)
if (hero.canCast('raise-dead')):
hero.cast('raise-dead')
if(hero.isReady("throw") and hero.distanceTo(target)<hero.trowRange):
hero.trow(target)
elif (hero.canCast('poison-cloud', target)):
hero.cast('poison-cloud', target)
elif (hero.canCast('fear', target)):
hero.cast('fear', target)
else:
if (hero.canCast('earthskin', self)):
hero.cast('earthskin', self)
elif (hero.canCast('chain-lightning', target)):
hero.cast('chain-lightning', target)
elif (hero.distanceTo(target) > 100):
moveTo(target.pos)
#elif (hero.canCast('drain-life', target)):
# hero.cast('drain-life', target)
elif (hero.isReady("attack")):
hero.attack(target)
invis = -5
while True:
commandTroops()
if hero.now() - invis < 4:
items = hero.findItems()
pickUpNearestItem(items)
else:
if (hero.canCast('earthskin', self)):
hero.cast('earthskin', self)
attack()
summonTroops()
| [
"vadim-job-hg@yandex.ru"
] | vadim-job-hg@yandex.ru |
c45ce4740fa2a925305485c01cd487ebf775fb88 | 02c6653a60df3e6cfeab65f125cb6daccb7735be | /fjord/heartbeat/urls.py | 659f237fbb4075496422816ee9bd7b9cb172adb5 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | taranjeet/fjord | 35f628afd87ac16175367bb1f82a715c1ee7d0bb | 47d6380313d010a18621b4d344d01792003d04c9 | refs/heads/master | 2021-05-29T11:44:21.254707 | 2015-08-28T14:01:51 | 2015-08-28T14:01:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from django.conf.urls import patterns, url
from .api_views import HeartbeatV2API
urlpatterns = patterns(
'fjord.heartbeat.views',
url(r'^api/v2/hb/?$', HeartbeatV2API.as_view(), name='heartbeat-api')
)
| [
"willkg@mozilla.com"
] | willkg@mozilla.com |
719ec4f3c3e59c60171452530146d2221945d4ee | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/pytest/test_fibo.py | b5fb18f78c4551b40119f1a0286fb195b1b872b7 | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 356 | py | import fibo
def test_fibonacci_number():
assert fibo.fibonacci_number(1) == 1
assert fibo.fibonacci_number(2) == 1
assert fibo.fibonacci_number(3) == 2
assert fibo.fibonacci_number(4) == 2
def test_fibo():
assert fibo.fibonacci_list(1) == [1]
assert fibo.fibonacci_list(2) == [1, 1]
assert fibo.fibonacci_list(3) == [1, 1, 2]
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
2bcaeeb5d8679c9c878aa8b7464b5a3b67e50e49 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_20200412200806.py | 9ba361ee1a74e58d4d8e25e293a01152216bd14d | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from time import time
def rho_free(x,xp,beta):
"""
Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito.
"""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
return 0.5*x**2
def anharmonic_potential(x):
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al osciladoe armónico
(presente en un baño térmico) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición dada para temperatura T dada.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def rho_trotter(grid, beta, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas y un potencial dado
Recibe:
grid: list -> lista de dimensión N
beta: float -> inverso de temperatura en unidades reducidas
potential: func -> potencial de interacción
Devuelve:
matrix -> matriz densidad de dimension NxN
"""
return np.array([ [ rho_free(x , xp,beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid] for xp in grid])
x_max = 5.
nx = 101
dx = 2. * x_max / (nx - 1)
grid_x = np.array([i*dx for i in range(-int((nx-1)/2), int(nx/2 + 1))])
N_beta = 2
beta_fin = 4
beta_ini = beta_fin/N_beta
rho = rho_trotter(grid_x,beta_ini/N_beta,potential=harmonic_potential)
for i in range(N_beta):
rho = np.dot(rho,rho)
rho *= dx
beta_ini *= 2.
print('%d) beta: %.2E -> %.2E'%(i, beta_ini/2,beta_ini))
# checkpoint: trace(rho)=0 when N_beta>16 and nx~1000 or nx~100
# parece que la diferencia entre los picos es siempre constante
print (dx)
print(np.trace(rho))
rho_normalized = rho/(np.trace(rho)*dx)
weights = np.diag(rho_normalized)
plt.figure()
plt.plot(grid_x, weights, label = 'Matrix Convolution +\nTrotter formula')
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'$\pi^{(Q)}(x;beta)$' )
plt.legend(title=u'$\\beta=%.2E$'%beta_fin)
plt.tight_layout()
plt.show()
plt.close()
| [
"jeaz.git@gmail.com"
] | jeaz.git@gmail.com |
ff7c686a74fe29a06e92a14b49df3fabc3d41990 | f9a5e7233875989f994438ce267907d8210d60a1 | /test/cylindrical_bands/metalearning/knn_ranking/RMSE/k=3/customised_set/cylinder_prediction_custom_AUCROC.py | 0d50ab65229407ef1d6da1dd984ef51326226d83 | [] | no_license | renoslyssiotis/When-are-Machine-learning-models-required-and-when-is-Statistics-enough | da8d53d44a69f4620954a32af3aacca45e1ed641 | 6af1670a74345f509c86b7bdb4aa0761c5b058ff | refs/heads/master | 2022-08-29T20:21:57.553737 | 2020-05-26T18:03:46 | 2020-05-26T18:03:46 | 256,439,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,210 | py | import sys, os, pickle
from pathlib import PurePath
current_dir = os.path.realpath(__file__)
p = PurePath(current_dir)
sys.path.append(str(p.parents[7])+'/metalearners/knn_ranking_method/RMSE')
from KNN_ranking_k_3_RMSE import KNN_ranking
#Load the selected meta-dataset after performing zero-variance threshold
with open(str(p.parents[7])+'/analysis/feature_selection/customised_set/customised_X_AUCROC_202.pickle', 'rb') as handle:
metadataset_feature_selected = pickle.load(handle)
#=====================META-FEATURE EXTRACTION==================================
with open(str(p.parents[5])+'/actual/cylinder_metafeatures_202.pickle', 'rb') as handle:
meta_features = pickle.load(handle)
#nested_results is a nested dictionary with all the AUC-ROC performances for each dataset and all models
with open(str(p.parents[6])+'/nested_results_roc.pickle', 'rb') as handle:
nested_results_roc = pickle.load(handle)
"""
Remove the meta-features which are not in the meta-dataset
(i.e. the features which have not been selected in the feature selection process)
"""
metafeatures_to_be_removed = []
for metafeature in meta_features.keys():
if metafeature in metadataset_feature_selected.columns:
pass
else:
metafeatures_to_be_removed.append(metafeature)
[meta_features.pop(key) for key in metafeatures_to_be_removed]
#========================META-LEARNING: RANKING================================
#KNN Ranking Method
top1, top2, top3 = KNN_ranking(metadataset_feature_selected, meta_features, nested_results_roc)
print("==========================================")
print(" AUC-ROC ")
print("==========================================")
print("Top 1 predicted model: " + top1)
print("Top 2 predicted model: " + top2)
print("Top 3 predicted model: " + top3)
#Actual results
with open(str(p.parents[5])+'/actual/cylinder_top_3_roc.pickle', 'rb') as handle:
actual_results = pickle.load(handle)
print("==========================================")
print("Top 1 ACTUAL model: " + actual_results[0])
print("Top 2 ACTUAL model: " + actual_results[1])
print("Top 3 ACTUAL model: " + actual_results[2])
| [
"rl554@cam.ac.uk"
] | rl554@cam.ac.uk |
bac2b87e56944a01d46105dc6f7838937e6a6398 | c380976b7c59dadaccabacf6b541124c967d2b5a | /.history/src/data/data_20191021145012.py | 878b2b9bced3b5913cb149f5d8e0ed6dc8bbab21 | [
"MIT"
] | permissive | bkraft4257/kaggle_titanic | b83603563b4a3c995b631e8142fe72e1730a0e2e | f29ea1773773109a867278c001dbd21a9f7b21dd | refs/heads/master | 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,836 | py | import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], age_bins=None, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins=None,
fare_mode=None,
embarked_mode=None,
Xy_age_estimate=None,
drop_columns=None,
):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if age_bins is None:
age_bins = [0, 10, 20, 30, 40, 50, 60, np.inf]
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
self.Xy = self.raw.Xy_raw.copy()
if fare_mode is None:
fare_mode = self.Xy["fare"].mode()[0]
if embarked_mode is None:
embarked_mode = self.Xy["embarked"].mode()[0]
self.fare_mode = fare_mode
self.embarked_mode = embarked_mode
self.impute_missing_fare()
self.impute_missing_embarked()
self.extract_title()
# self.extract_last_name()
# self.extract_cabin_number()
# self.extract_cabin_prefix()
# self.estimate_age()
# self.calc_age_bins()
# self.calc_is_child()
# self.calc_is_travelling_alone()
def calc_is_travelling_alone(self):
self.Xy["is_travelling_alone"] = (self.Xy.sibsp == 0) & (self.Xy.parch == 0)
def calc_is_child(self):
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""Extract title from the name using nameparser.
If the Title is empty then we will fill the title with either Mr or Mrs depending upon the sex. This
is adequate for the train and holdout data sets.
"""
title = (self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
.replace({"":np.nan})
.fillna(self.Xy['sex'])
.replace({'female':'Mrs', 'male':'Mr'})
)
self.Xy["title"] = title
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
self.Xy["age_bin"] = pd.cut(
self.Xy.age, bins=[0, 10, 20, 30, 40, 50, 60, np.inf]
)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if self.Xy_age_estimate is None:
self.Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
self.Xy_age_estimate = self.Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = (
self.Xy.reset_index()
.merge(self.Xy_age_estimate, on=groupby_columns)
.set_index("passengerid")
)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
def impute_missing_fare(self):
self.Xy["fare"] = self.Xy["fare"].fillna(self.fare_mode)
def impute_missing_embarked(self):
self.Xy["embarked"] = self.Xy["embarked"].fillna(self.embarked_mode)
| [
"bob.kraft@infiniteleap.net"
] | bob.kraft@infiniteleap.net |
803e85beedc5941fec0a4790c7e8e2d6034d23c9 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/expert/5_1.py | 25b899efb6d6d0099e799bc80911fcb7704bc88c | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,079 | py | Optparse module in Python
**Optparse** module makes easy to write command-line tools. It allows
argument parsing in the python program.
* **optparse** make it easy to handle the command-line argument.
* It comes default with python.
* It allows dynamic data input to change the output
**Code: Creating an OptionParser object.**
## Python3
__
__
__
__
__
__
__
import optparse
parser = optparse.OptionParser()
---
__
__
**Defining options:**
It should be added one at a time using the **add_option()**. Each Option
instance represents a set of synonymous command-line option string.
Way to create an Option instance are:
> OptionParser. **add_option** ( _option_ )
>
> OptionParser. **add_option** ( _*opt_str, attr=value, .._.)
To define an option with only a short option string:
parser.add_option("-f", attr=value, ....)
And to define an option with only a long option string:
parser.add_option("--foo", attr=value, ....)
**Standard Option Actions:**
> * **“store”:** store this option’s argument (default).
> * **“store_const”:** store a constant value.
> * **“store_true”:** store True.
> * **“store_false”:** store False.
> * **“append”:** append this option’s argument to a list.
> * **“append_const”:** append a constant value to a list.
>
**Standard Option Attributes:**
> * **Option.action:** (default: “store”)
> * **Option.type:** (default: “string”)
> * **Option.dest:** (default: derived from option strings)
> * **Option.default:** The value to use for this option’s destination if
> the option is not seen on the command line.
>
###
Here’s an example of using optparse module in a simple script:
## Python3
__
__
__
__
__
__
__
# import OptionParser class
# from optparse module.
from optparse import OptionParser
# create a OptionParser
# class object
parser = OptionParser()
# ass options
parser.add_option("-f", "--file",
dest = "filename",
help = "write report to FILE",
metavar = "FILE")
parser.add_option("-q", "--quiet",
action = "store_false",
dest = "verbose", default = True,
help = "don't print status messages to stdout")
(options, args) = parser.parse_args()
---
__
__
With these few lines of code, users of your script can now do the “usual
thing” on the command-line, for example:
<yourscript> --file=outfile -q
#### Lets, understand with an example:
**Code:** Writing python script for print table of n.
## Python3
__
__
__
__
__
__
__
# import optparse module
import optparse
# define a function for
# table of n
def table(n, dest_cheak):
for i in range(1,11):
tab = i*n
if dest_cheak:
print(tab)
return tab
# define a function for
# adding options
def Main():
# create OptionParser object
parser = optparse.OptionParser()
# add options
parser.add_option('-n', dest = 'num',
type = 'int',
help = 'specify the n''th table number to output')
parser.add_option('-o', dest = 'out',
type = 'string',
help = 'specify an output file (Optional)')
parser.add_option("-a", "--all",
action = "store_true",
dest = "prin",
default = False,
help = "print all numbers up to N")
(options, args) = parser.parse_args()
if (options.num == None):
print (parser.usage)
exit(0)
else:
number = options.num
# function calling
result = table(number, options.prin)
print ("The " + str(number)+ "th table is " +
str(result))
if (options.out != None):
# open a file in append mode
f = open(options.out,"a")
# write in the file
f.write(str(result) + '\n')
# Driver code
if __name__ == '__main__':
# function calling
Main()
---
__
__
**Output:**
python file_name.py -n 4

python file_name.py -n 4 -o

file.txt created

python file_name.py -n 4 -a

For knowing more about this module click here.
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
0557e253ffe377f396b357d6b0b6fbea9fdb29fd | d9f775ba2a46b8c95d68e61bea929810a226f4b8 | /src/investigation/analyze_threshold_neural_network_classifier.py | 7760b2058f7c74e5905913aa113c842b8fd4028c | [] | no_license | barium-project/qubit-reliability | 6e25f916fa2fcd2c7a078edc7045071ffae9e0bc | 902ef98e9a51fe3cf4413ccb79e90d44051a48b7 | refs/heads/master | 2022-12-13T20:02:02.826329 | 2020-06-23T04:14:41 | 2020-06-23T04:14:41 | 251,139,645 | 0 | 0 | null | 2022-12-08T09:43:15 | 2020-03-29T21:35:40 | Jupyter Notebook | UTF-8 | Python | false | false | 2,925 | py | from src.features.build_features import *
from src.models.threshold_classifiers import *
from src.visualization.visualize import *
from sklearn.model_selection import StratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix
if __name__ == "__main__":
X, y, s = load_data('ARTIFICIAL_V4', stats=True)
print(s)
qubits_class = [
(s['file_range']['./data/artificial/v4/dark_tags_by_trial_with_decay_MC.csv'][0] <= i
and i <= s['file_range']['./data/artificial/v4/dark_tags_by_trial_with_decay_MC.csv'][1]) * 1000
+ y[i] * 100
+ len(X[i]) for i in range(len(X))]
indices = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED).split(X, qubits_class))
pipeline = Pipeline([
("Histogramizer", Histogramizer(bins=11, range=(s['first_arrival'], s['last_arrival']))),
("Neural network", MLPClassifier(hidden_layer_sizes=(33, 33), activation='relu', solver='adam', max_iter=50, tol=0.001, verbose=True))]
)
for i in indices[:]:
# Train neural network
pipeline.fit(X[i[0]], y[i[0]])
print(len(X[i[0]]))
print(len(X[i[1]]))
print()
# Predict with threshold
model = ThresholdCutoffClassifier(14)
y_pred_threshold = model.predict(X[i[1]])
dp_threshold = filter_datapoints(X[i[1]], y[i[1]], y_pred_threshold, indices=i[1])
i_n = list(dp_threshold['i_n']) + list(dp_threshold['i_fn'])
print(len(dp_threshold['i_n']))
print(len(dp_threshold['i_fn']))
print(len(dp_threshold['i_p']))
print(len(dp_threshold['i_fp']))
print((float(len(dp_threshold['i_p'])) + len(dp_threshold['i_n'])) / len(X[i[1]]))
print()
# Predict with neural network
y_pred_neural_network_all = pipeline.predict(X[i[1]])
dp_neural_network_all = filter_datapoints(X[i[1]], y[i[1]], y_pred_neural_network_all, indices=i[1])
print(len(dp_neural_network_all['i_n']))
print(len(dp_neural_network_all['i_fn']))
print(len(dp_neural_network_all['i_p']))
print(len(dp_neural_network_all['i_fp']))
print((float(len(dp_neural_network_all['i_p'])) + len(dp_neural_network_all['i_n'])) / len(X[i[1]]))
print()
# Predict with the brights of the threshold using the neural network
y_pred_neural_network = pipeline.predict(X[i_n])
dp_neural_network = filter_datapoints(X[i_n], y[i_n], y_pred_neural_network, indices=i_n)
print(len(dp_neural_network['i_n']))
print(len(dp_neural_network['i_fn']))
print(len(dp_neural_network['i_p']))
print(len(dp_neural_network['i_fp']))
print((float(len(dp_threshold['i_p'])) + len(dp_neural_network['i_n']) + len(dp_neural_network['i_p'])) / len(X[i[1]]))
| [
"quentintruong@users.noreply.github.com"
] | quentintruong@users.noreply.github.com |
014809a20edc47b0f80c3f630ec3cc2a6744ad2c | ed37c6acf35ad8dfa7064c7d304f046c3657cb7a | /leetcode/google_prep/interview_process/1_unique_email.py | dcc4e4af6a4e737bc580bcd1b42c12bd29134c15 | [] | no_license | etture/algorithms_practice | 7b73753f5d579b7007ddd79f9a73165433d79b13 | ba398a040d2551b34f504ae1ce795e8cd5937dcc | refs/heads/master | 2021-11-18T20:37:11.730912 | 2021-10-03T23:25:24 | 2021-10-03T23:25:24 | 190,863,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | # Basic imports --------------------------------------------
from __future__ import annotations
import sys
sys.setrecursionlimit(10**6)
from os.path import dirname, abspath, basename, normpath
root = abspath(__file__)
while basename(normpath(root)) != 'algo_practice':
root = dirname(root)
sys.path.append(root)
from utils.Tester import Tester, Logger
logger = Logger(verbose=False)
# ----------------------------------------------------------
def numUniqueEmails(emails: List[str]) -> int:
email_set = set()
for email in emails:
print('')
local_name, domain_name = email.split('@')
print(f'domain: {domain_name}')
pre_plus = local_name.split('+')[0]
print(f'preplus: {pre_plus}')
local_name = ''.join(pre_plus.split('.'))
print(f'local: {local_name}')
email_set.add('@'.join([local_name, domain_name]))
print('@'.join([local_name, domain_name]))
return len(email_set)
'''메인 실행 코드 -- DO NOT TOUCH BELOW THIS LINE'''
# 테스트 케이스
# Tuple[0]은 input, Tuple[1]은 나와야 하는 expected output
test_cases = [
([[
"test.email+alex@leetcode.com",
"test.e.mail+bob.cathy@leetcode.com",
"testemail+david@lee.tcode.com"
]], 2),
([["test.email+alex@leetcode.com", "test.email@leetcode.com"]], 1)
]
if __name__ == '__main__':
Tester.factory(
test_cases,
func=lambda input: numUniqueEmails(input[0])
).run()
| [
"etture@gmail.com"
] | etture@gmail.com |
733fe640fa3867b0fcb26f19fc4edab1eeb4e217 | bdb3716c644b8d031af9a5285626d7ccf0ecb903 | /code/UI/OpenAPI/python-flask-server/openapi_server/test/test_translate_controller.py | 0c41ea731f2ae0627548d0524755aeed0c091282 | [
"MIT",
"Apache-2.0"
] | permissive | RTXteam/RTX | 97d2a8946d233d48cc1b165f5e575af21bda4b26 | ed0693dd03149e56f7dfaf431fb8a82ace0c4ef3 | refs/heads/master | 2023-09-01T21:48:49.008407 | 2023-09-01T20:55:06 | 2023-09-01T20:55:06 | 111,240,202 | 43 | 31 | MIT | 2023-09-14T16:20:01 | 2017-11-18T21:19:13 | Python | UTF-8 | Python | false | false | 1,003 | py | # coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.models.query import Query # noqa: E501
from openapi_server.test import BaseTestCase
class TestTranslateController(BaseTestCase):
"""TranslateController integration test stubs"""
def test_translate(self):
"""Test case for translate
Translate natural language question into a standardized query
"""
request_body = None
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
response = self.client.open(
'/api/arax/v1/translate',
method='POST',
headers=headers,
data=json.dumps(request_body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| [
"edeutsch@systemsbiology.org"
] | edeutsch@systemsbiology.org |
8096d340b32c90ac60bfa05eba86ddb281b636b5 | a4e2b2fa5c54c7d43e1dbe4eef5006a560cd598e | /django_silky/example_app/gen.py | 288d35821568f40c6ca93eece2162dab74d0f8b1 | [
"MIT"
] | permissive | joaofrancese/silk | baa9fc6468351ec34bc103abdbd1decce0ae2f5d | d8de1367eb70f4405f4ae55d9286f0653c5b3189 | refs/heads/master | 2023-04-01T07:30:42.707427 | 2017-02-22T14:06:05 | 2017-02-22T14:06:05 | 23,427,190 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | """generate fake data for the example app"""
from example_app.models import Blind
__author__ = 'mtford'
def main():
venetian = Blind.objects.create(name='Venetian', child_safe=False)
roller = Blind.objects.create(name='Roller', child_safe=True)
if __name__ == '__main__':
main() | [
"mtford@gmail.com"
] | mtford@gmail.com |
4a78028471cb49bb731b246301644655ed79c5c4 | 9d1491368c5e87760131ba27d252ee2d10620433 | /gammapy/time/tests/test_simulate.py | 8d7abfbdc73cb8e1bad04343c6359981797ac35e | [
"BSD-3-Clause"
] | permissive | cnachi/gammapy | f9295306a8e81d0b7f4d2111b3fa3679a78da3f7 | 3d3fc38c111d2f490d984082750f8003580fe06c | refs/heads/master | 2021-01-20T23:37:59.409914 | 2016-06-09T08:36:33 | 2016-06-09T08:36:33 | 60,764,807 | 0 | 0 | null | 2016-06-09T09:55:54 | 2016-06-09T09:55:54 | null | UTF-8 | Python | false | false | 716 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_almost_equal
from astropy.units import Quantity
from ..simulate import make_random_times_poisson_process as random_times
def test_make_random_times_poisson_process():
time = random_times(size=10,
rate=Quantity(10, 'Hz'),
dead_time=Quantity(0.1, 'second'),
random_state=0)
assert np.min(time) >= Quantity(0.1, 'second')
assert_almost_equal(time[0].sec, 0.179587450816311)
assert_almost_equal(time[-1].sec, 0.14836021009022532)
| [
"Deil.Christoph@gmail.com"
] | Deil.Christoph@gmail.com |
5057929f5ddc777fbf9c98216d967df1d3d54632 | 9ce80fd45e0a2a321d9285be1998133405c8cf11 | /meiduo_mall/celery_tasks/main.py | a425e9e0f64d30b876eb768fa35d6da7e31a20e8 | [
"MIT"
] | permissive | dingmingren/meiduo_project | db7d48acf2f8ad5612fe89370d9ed6633c4ac60a | 51497064e8dd24fb45f4d39ca6ed9ae623b8cb79 | refs/heads/master | 2020-07-05T04:46:55.121739 | 2019-08-27T13:00:34 | 2019-08-27T13:00:34 | 202,483,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #1、导包
from celery import Celery
#2、加载项目配置文件
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meiduo_mall.settings.dev")
#3、实例化
app = Celery('celery_tasks')
#4、加载celery配置文件
app.config_from_object('celery_tasks.config')
#5、celery加载任务
#发短信任务
app.autodiscover_tasks(['celery_tasks.sms','celery_tasks.email'])
#发邮件任务
# app.autodiscover_tasks(['celery_tasks.email']) | [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
55890962eb32cf417864520b10bf305af50f5049 | e8abc7c32f7c1be189b93345f63d7f8f03e40b0f | /examples/launch_tor_endpoint2.py | 1adc178bf28d33da5fb8cf389ea1cccb5f45e3a5 | [
"MIT"
] | permissive | david415/txtorcon | 8e37a71a7fce678b4d7646d8b22df622b7508452 | 3d7ad0d377f3344a33c7d67a19717cc3674bed6a | refs/heads/192.fix_build_timeout_circuit.0 | 2021-01-17T17:08:52.059542 | 2016-11-22T20:34:21 | 2016-11-22T20:34:21 | 19,297,689 | 3 | 1 | MIT | 2018-04-27T18:29:52 | 2014-04-30T01:52:53 | Python | UTF-8 | Python | false | false | 1,435 | py | #!/usr/bin/env python
# Here we set up a Twisted Web server and then launch a slave tor
# with a configured hidden service directed at the Web server we set
# up. This uses serverFromString to translate the "onion" endpoint descriptor
# into a TCPHiddenServiceEndpoint object...
import shutil
from twisted.internet import reactor
from twisted.web import server, resource
from twisted.internet.endpoints import serverFromString
import txtorcon
class Simple(resource.Resource):
isLeaf = True
def render_GET(self, request):
return "<html>Hello, world! I'm a hidden service!</html>"
site = server.Site(Simple())
def setup_failed(arg):
print "SETUP FAILED", arg
def setup_complete(port):
local = txtorcon.IHiddenService(port).local_address.getHost()
print "Hidden serivce:", port.getHost()
print " locally at:", local
def progress(percent, tag, message):
bar = int(percent / 10)
print '[%s%s] %s' % ('#' * bar, '.' * (10 - bar), message)
hs_endpoint1 = serverFromString(reactor, "onion:80")
hs_endpoint2 = serverFromString(reactor, "onion:80")
txtorcon.IProgressProvider(hs_endpoint1).add_progress_listener(progress)
txtorcon.IProgressProvider(hs_endpoint2).add_progress_listener(progress)
d1 = hs_endpoint1.listen(site)
d2 = hs_endpoint2.listen(site)
d1.addCallback(setup_complete).addErrback(setup_failed)
d2.addCallback(setup_complete).addErrback(setup_failed)
reactor.run()
| [
"meejah@meejah.ca"
] | meejah@meejah.ca |
55f70e257d55919419c13c52d587546cf180076f | 8534f1109cbd6bdae8e5110e2438331ded6f1134 | /lnotab.py | f25e08d12bc38d29a9dd4ce4e5d27289fe98cc73 | [] | no_license | laike9m/TestPython | dc802053fd0eee329aca8517ccd6f2e97846f221 | 0587c33764b8f88975d1156b73400926e77014c6 | refs/heads/master | 2022-12-21T12:29:45.570030 | 2022-01-05T06:13:32 | 2022-01-05T06:13:32 | 195,931,990 | 0 | 0 | null | 2022-12-13T23:41:15 | 2019-07-09T04:21:59 | Python | UTF-8 | Python | false | false | 293 | py | """A doc string
foo
"""
import sys
import ast
import astor
import inspect
frame = sys._getframe()
print(frame.f_code.co_lnotab)
print(frame.f_code.co_firstlineno)
def f():
x = 1
frame = sys._getframe()
print(frame.f_code.co_lnotab)
print(frame.f_code.co_firstlineno)
f()
| [
"laike9m@gmail.com"
] | laike9m@gmail.com |
5d74f273cd83e256d34521b9117230ddc8efebbf | 947e71b34d21f3c9f5c0a197d91a880f346afa6c | /ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py | cf56a7fdb9f8a91aba6d59d4588a583b21c64f94 | [
"MIT",
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | liuwenru/Apache-Ambari-ZH | 4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0 | 7879810067f1981209b658ceb675ac76e951b07b | refs/heads/master | 2023-01-14T14:43:06.639598 | 2020-07-28T12:06:25 | 2020-07-28T12:06:25 | 223,551,095 | 38 | 44 | Apache-2.0 | 2023-01-02T21:55:10 | 2019-11-23T07:43:49 | Java | UTF-8 | Python | false | false | 5,613 | py | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
import os.path
from resource_management.core.resources.system import Directory, Execute, File
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.source import InlineTemplate, StaticFile
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.constants import StackFeature
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.stack_features import check_stack_feature
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
from ambari_commons import OSConst
from ambari_commons.constants import SERVICE
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def webhcat():
import params
XmlConfig("webhcat-site.xml",
conf_dir=params.hcat_config_dir,
configurations=params.config['configurations']['webhcat-site']
)
# Manually overriding service logon user & password set by the installation package
ServiceConfig(params.webhcat_server_win_service_name,
action="change_user",
username = params.webhcat_user,
password = Script.get_password(params.webhcat_user))
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def webhcat():
import params
Directory(params.templeton_pid_dir,
owner=params.webhcat_user,
mode=0755,
group=params.user_group,
create_parents = True)
Directory(params.templeton_log_dir,
owner=params.webhcat_user,
mode=0755,
group=params.user_group,
create_parents = True)
Directory(params.config_dir,
create_parents = True,
owner=params.webhcat_user,
group=params.user_group,
cd_access="a")
# Replace _HOST with hostname in relevant principal-related properties
webhcat_site = params.config['configurations']['webhcat-site'].copy()
for prop_name in ['templeton.hive.properties', 'templeton.kerberos.principal']:
if prop_name in webhcat_site:
webhcat_site[prop_name] = webhcat_site[prop_name].replace("_HOST", params.hostname)
XmlConfig("webhcat-site.xml",
conf_dir=params.config_dir,
configurations=webhcat_site,
configuration_attributes=params.config['configurationAttributes']['webhcat-site'],
owner=params.webhcat_user,
group=params.user_group,
)
# if we're in an upgrade of a secure cluster, make sure hive-site and yarn-site are created
if check_stack_feature(StackFeature.CONFIG_VERSIONING, params.stack_version_formatted_major) and \
params.version and params.stack_root:
XmlConfig("hive-site.xml",
conf_dir = format("{stack_root}/{version}/hive/conf"),
configurations = params.config['configurations']['hive-site'],
configuration_attributes = params.config['configurationAttributes']['hive-site'],
owner = params.hive_user,
group = params.user_group,
)
XmlConfig("yarn-site.xml",
conf_dir = format("{stack_root}/{version}/hadoop/conf"),
configurations = params.config['configurations']['yarn-site'],
configuration_attributes = params.config['configurationAttributes']['yarn-site'],
owner = params.yarn_user,
group = params.user_group,
)
File(format("{config_dir}/webhcat-env.sh"),
owner=params.webhcat_user,
group=params.user_group,
content=InlineTemplate(params.webhcat_env_sh_template)
)
Directory(params.webhcat_conf_dir,
cd_access='a',
create_parents = True
)
log4j_webhcat_filename = 'webhcat-log4j.properties'
if (params.log4j_webhcat_props != None):
File(format("{config_dir}/{log4j_webhcat_filename}"),
mode=0644,
group=params.user_group,
owner=params.webhcat_user,
content=InlineTemplate(params.log4j_webhcat_props)
)
elif (os.path.exists("{config_dir}/{log4j_webhcat_filename}.template")):
File(format("{config_dir}/{log4j_webhcat_filename}"),
mode=0644,
group=params.user_group,
owner=params.webhcat_user,
content=StaticFile(format("{config_dir}/{log4j_webhcat_filename}.template"))
)
# Generate atlas-application.properties.xml file
if params.enable_atlas_hook:
# WebHCat uses a different config dir than the rest of the daemons in Hive.
atlas_hook_filepath = os.path.join(params.config_dir, params.atlas_hook_filename)
setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
| [
"ijarvis@sina.com"
] | ijarvis@sina.com |
fc4acfc5e6a1ae4758a0853857d51c68969e09ca | 6e95e9b6a1fc996ebcb46c44d4ef7678f762e4f7 | /others/taobao_spider/test/service/test_job.py | c68884724f84ce028a0e969f4244394ba839f79e | [
"Apache-2.0",
"Unlicense"
] | permissive | 625781186/lgd_spiders | 3a4d6917a01e446136e7aef4c92b9b7a1f8e498d | 1c8680115beb42f4daaf6be71bf3fb14fcc2c255 | refs/heads/master | 2020-08-29T13:21:12.116395 | 2019-10-21T14:28:00 | 2019-10-21T14:28:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # -*- coding: utf-8 -*-
from unittest import TestCase
from mall_spider.spiders.actions.action_service import get_action_service
from mall_spider.utils.date_util import today, yesterday
class TestJob(TestCase):
__action_service = get_action_service()
def test_execute_sycm_category_job_init_actions(self):
# date_str = yesterday().strftime("%Y-%m-%d")
date_str = yesterday().strftime("2019-01-29")
self.__action_service.execute_sycm_category_job_init_actions(date_str)
| [
"lgdupup"
] | lgdupup |
d08e4918619a53ca615232233decab9bb16ac9bb | 155cbccc3ef3b8cba80629f2a26d7e76968a639c | /thelma/repositories/rdb/mappers/rackposition.py | f42d135cfc7afb84a57fc4997a6efa5274120fc8 | [
"MIT"
] | permissive | papagr/TheLMA | 1fc65f0a7d3a4b7f9bb2d201259efe5568c2bf78 | d2dc7a478ee5d24ccf3cc680888e712d482321d0 | refs/heads/master | 2022-12-24T20:05:28.229303 | 2020-09-26T13:57:48 | 2020-09-26T13:57:48 | 279,159,864 | 1 | 0 | MIT | 2020-07-12T22:40:36 | 2020-07-12T22:40:35 | null | UTF-8 | Python | false | false | 903 | py | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Rack position mapper.
"""
from sqlalchemy import func
from everest.repositories.rdb.utils import mapper
from thelma.entities.rack import RackPosition
__docformat__ = "reStructuredText en"
__all__ = ['create_mapper']
def create_mapper(rack_position_tbl):
"Mapper factory."
m = mapper(RackPosition, rack_position_tbl,
id_attribute='rack_position_id',
slug_expression=lambda cls: func.lower(cls._label), # pylint: disable=W0212
properties=
dict(_label=rack_position_tbl.c.label,
_row_index=rack_position_tbl.c.row_index,
_column_index=rack_position_tbl.c.column_index
),
)
return m
| [
"fogathmann@gmail.com"
] | fogathmann@gmail.com |
4fbd0fc8fdb23a67806ad582e85e7afbddace103 | c16aee56666e7da3532e9dd8cd065e61f25b7e50 | /week1/triangle.py | 3c5049536f1d82082cf9d94959e30383db4ee20b | [] | no_license | loganmurphy/DC_2017_Python3 | 8e6af1f9db2a927242b1bf0f8f9104eb3b32ec86 | 1a0b4b4b37537c354025dd9318d8d14b27791e87 | refs/heads/master | 2021-07-20T08:51:45.625656 | 2017-10-27T03:33:12 | 2017-10-27T03:33:12 | 103,613,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | height = 4
width = 7
star = "*"
space = " "
i = 0
while i < height:
print((space * width) + star)
i += 1
width -= 1
star = (2 * "*") + star
| [
"loganmurphy1984@gmail.com"
] | loganmurphy1984@gmail.com |
d73734d96cd230ceb5c9108f714f3714d54bf033 | 83277e8b959de61b655f614b7e072394a99d77ae | /venv/bin/pip3.7 | 004fc1d5fa6738f54b95ef6aeeedc8bac6a822c9 | [
"MIT"
] | permissive | hskang9/scalable-django | b3ed144670c3d5b244168fdd38f33e1f596253c0 | 162e0f4a3d49f164af1d33298fa9a47b66508cbf | refs/heads/master | 2023-04-29T05:33:23.460640 | 2020-03-27T00:55:28 | 2020-03-27T00:55:28 | 247,036,359 | 2 | 1 | MIT | 2023-04-21T20:53:08 | 2020-03-13T09:40:37 | Python | UTF-8 | Python | false | false | 431 | 7 | #!/Users/hyungsukkang/PycharmProjects/django_graphql_container/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"hyungsukkang@Hyungsuks-Mac-mini.local"
] | hyungsukkang@Hyungsuks-Mac-mini.local |
6d933276903d9ffc7b771f5c55d622336661f483 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/site-packages/soupsieve/css_parser.py | 8616d61b56571f69356cc441403dd95d3f6a796b | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ec6cf659d2ea0ac4f743c1c6502d5f376a92d2077bb63b5d79914e584458b656
size 43200
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
e2c7f0a8cc38b781fef073c5f1e0fc43c09ee60d | 5d28c38dfdd185875ba0edaf77281e684c81da0c | /mlflow/models/evaluation/lift_curve.py | de722164777e792d3be576fca7c1622fd425264d | [
"Apache-2.0"
] | permissive | imrehg/mlflow | 3a68acc1730b3ee6326c1366760d6ddc7e66099c | 5ddfe9a1b48e065540094d83125040d3273c48fa | refs/heads/master | 2022-09-24T05:39:02.767657 | 2022-09-20T00:14:07 | 2022-09-20T00:14:07 | 244,945,486 | 1 | 0 | Apache-2.0 | 2020-03-04T16:11:54 | 2020-03-04T16:11:53 | null | UTF-8 | Python | false | false | 6,224 | py | import matplotlib.pyplot as plt
import numpy as np
def _cumulative_gain_curve(y_true, y_score, pos_label=None):
"""
This method is copied from scikit-plot package.
See https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/helpers.py#L157
This function generates the points necessary to plot the Cumulative Gain
Note: This implementation is restricted to the binary classification task.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_score (array-like, shape (n_samples)): Target scores, can either be
probability estimates of the positive class, confidence values, or
non-thresholded measure of decisions (as returned by
decision_function on some classifiers).
pos_label (int or str, default=None): Label considered as positive and
others are considered negative
Returns:
percentages (numpy.ndarray): An array containing the X-axis values for
plotting the Cumulative Gains chart.
gains (numpy.ndarray): An array containing the Y-axis values for one
curve of the Cumulative Gains chart.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The Cumulative
Gain Chart is only relevant in binary classification.
"""
y_true, y_score = np.asarray(y_true), np.asarray(y_score)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if pos_label is None and not (
np.array_equal(classes, [0, 1])
or np.array_equal(classes, [-1, 1])
or np.array_equal(classes, [0])
or np.array_equal(classes, [-1])
or np.array_equal(classes, [1])
):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.0
# make y_true a boolean vector
y_true = y_true == pos_label
sorted_indices = np.argsort(y_score)[::-1]
y_true = y_true[sorted_indices]
gains = np.cumsum(y_true)
percentages = np.arange(start=1, stop=len(y_true) + 1)
gains = gains / float(np.sum(y_true))
percentages = percentages / float(len(y_true))
gains = np.insert(gains, 0, [0])
percentages = np.insert(percentages, 0, [0])
return percentages, gains
def plot_lift_curve(
y_true,
y_probas,
title="Lift Curve",
ax=None,
figsize=None,
title_fontsize="large",
text_fontsize="medium",
pos_label=None,
):
"""
This method is copied from scikit-plot package.
See https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/metrics.py#L1133
Generates the Lift Curve from labels and scores/probabilities
The lift curve is used to determine the effectiveness of a
binary classifier. A detailed explanation can be found at
http://www2.cs.uregina.ca/~dbd/cs831/notes/lift_chart/lift_chart.html.
The implementation here works only for binary classification.
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"Lift Curve".
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the learning curve. If None, the plot is drawn on a new set of
axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
pos_label (optional): Label for the positive class.
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> lr = LogisticRegression()
>>> lr = lr.fit(X_train, y_train)
>>> y_probas = lr.predict_proba(X_test)
>>> plot_lift_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_lift_curve.png
:align: center
:alt: Lift Curve
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
if len(classes) != 2:
raise ValueError(
"Cannot calculate Lift Curve for data with {} category/ies".format(len(classes))
)
# Compute Cumulative Gain Curves
percentages, gains1 = _cumulative_gain_curve(y_true, y_probas[:, 0], classes[0])
percentages, gains2 = _cumulative_gain_curve(y_true, y_probas[:, 1], classes[1])
percentages = percentages[1:]
gains1 = gains1[1:]
gains2 = gains2[1:]
gains1 = gains1 / percentages
gains2 = gains2 / percentages
if ax is None:
_, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
label0 = "Class {}".format(classes[0])
label1 = "Class {}".format(classes[1])
# show (positive) next to the positive class in the legend
if pos_label:
if pos_label == classes[0]:
label0 = "Class {} (positive)".format(classes[0])
elif pos_label == classes[1]:
label1 = "Class {} (positive)".format(classes[1])
# do not mark positive class if pos_label is not in classes
ax.plot(percentages, gains1, lw=3, label=label0)
ax.plot(percentages, gains2, lw=3, label=label1)
ax.plot([0, 1], [1, 1], "k--", lw=2, label="Baseline")
ax.set_xlabel("Percentage of sample", fontsize=text_fontsize)
ax.set_ylabel("Lift", fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.grid("on")
ax.legend(loc="best", fontsize=text_fontsize)
return ax
| [
"noreply@github.com"
] | imrehg.noreply@github.com |
37ab98f9f1a3c980df3099c0873f46050d0682e3 | 3300f61798909af363504e577bfd7677e5239e8e | /docstrings/verify.py | f83fdf1827a069f56d0a89166016d69ec1ed8e39 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | jctanner/ansible-tests | 11c6ed23ef8be5a42401d1c93c86c2693068f8de | c0a2b2aff6002b98088ceb04e435e7a95f78d655 | refs/heads/master | 2020-04-07T10:23:13.031694 | 2014-04-08T02:02:15 | 2014-04-08T02:02:15 | 13,967,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,743 | py | #!/usr/bin/env python
# http://stackoverflow.com/questions/1714027/version-number-comparison
import os
import sys
import ast
import subprocess
import shlex
from distutils.version import LooseVersion
from pkg_resources import parse_version
import cPickle as pickle
docscript = """#!/usr/bin/env python
import os
import sys
from ansible import utils
from ansible.utils import module_docs
module_name = sys.argv[1]
try:
module_path = utils.plugins.module_finder.find_plugin(module_name)
except:
print None
sys.exit(1)
doc = None
try:
doc, plainexamples = module_docs.get_docstring(module_path)
except AssertionError, e:
pass
except SyntaxError, e:
pass
except Exception, e:
pass
if not doc:
sys.exit(1)
else:
print doc
"""
class Checkout(object):
def __init__(self, repo_url, branch='devel', tmp_path='/tmp'):
self.repo_url = repo_url
self.tmp_path = tmp_path
self.branch = branch
self.git = subprocess.check_output(['which', 'git'])
self.git = self.git.strip()
parts = repo_url.split('/')
self.repo_user = parts[-2]
self.repo_name = parts[-1]
self.repo_dir = self.repo_user + "_" + self.repo_name + "_" + branch
self.repo_path = os.path.join(self.tmp_path, self.repo_dir)
def makecheckout(self):
if not os.path.isdir(self.repo_path):
cmd = "git clone %s -b %s %s" % (self.repo_url, self.branch, self.repo_path)
print "# %s" % cmd
rc, out, err = run_command(cmd, cwd=self.tmp_path, shell=False)
if rc != 0:
import epdb; epdb.st()
def exec_command(self, cmd):
this_path = os.path.join(self.tmp_path, self.repo_dir, "hacking") + "/env-setup"
cmd = "source %s 2>&1 > /dev/null && %s" % (this_path, cmd)
rc, out, err = run_command(cmd, shell=True, executable="/bin/bash", split=False)
return rc, out, err
def run_command(cmd, cwd=None, shell=True, executable=None, split=True):
if type(cmd) is not list and split:
cmd = shlex.split(cmd)
if not cwd:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable=executable,
shell=shell)
else:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
executable=executable,
shell=shell)
out, err = p.communicate()
return p.returncode, out, err
def get_versions():
aversions = {}
cmd = "git branch --all"
rc, out, err = run_command(cmd, cwd="/home/jtanner/ansible", shell=False)
if rc != 0:
print 'ERROR: unable to run git branch --all in /home/jtanner/ansible'
sys.exit(1)
for bversion in out.split('\n'):
bversion = bversion.strip()
# skip head
if '->' in bversion:
continue
# skip non remotes
if not bversion.startswith('remotes'):
continue
bnormal = bversion.split('/')[-1]
aversions[bnormal] = bversion
return aversions
def make_test_plan(aversions, mdict):
plan = {}
for mkey in sorted(mdict.keys()):
for pkey in mdict[mkey]['params'].keys():
m_name = mdict[mkey]['module']
p_version = mdict[mkey]['params'][pkey]['version_added']
p_version = str(p_version)
m_name = mdict[mkey]['module']
if p_version not in plan:
plan[p_version] = {}
if m_name not in plan[p_version]:
plan[p_version][m_name] = []
plan[p_version][m_name].append(pkey)
plan.pop("historical", None)
return plan
def locate_parameter(aversions, module, param):
found = []
for akey in aversions.keys():
"""
if akey == 'devel':
this_version = "1.6"
else:
this_version = akey.replace('release', '')
this_version = this_version.replace('-', '')
this_version = str(this_version)
"""
#import epdb; epdb.st()
this_checkout = Checkout("https://github.com/ansible/ansible", branch=akey)
this_checkout.makecheckout()
# verify module_docs works on this version
cmd = "python /tmp/docscript.py %s" % 'file'
rc, out, err = this_checkout.exec_command(cmd)
if rc != 0:
#print '# %s unable to import module_docs' % akey
continue
# get all docs for this module at this version
cmd = "python /tmp/docscript.py %s" % module
rc, out, err = this_checkout.exec_command(cmd)
if rc == 0:
data = ast.literal_eval(out)
else:
data = None
if data:
if param in data['options']:
found.append(akey)
return found
def run_test_plan(plan, aversions, mdict):
keymap = {}
results = []
for akey in aversions.keys():
if akey == 'devel':
this_version = "1.6"
else:
this_version = akey.replace('release', '')
this_version = this_version.replace('-', '')
this_version = str(this_version)
keymap[this_version] = akey
for plan_version in sorted(plan.keys()):
if not plan_version in keymap:
continue
this_branch = aversions[keymap[plan_version]]
this_branch = this_branch.split('/')[-1]
print "#",plan_version,":",this_branch
this_checkout = Checkout("https://github.com/ansible/ansible", branch=this_branch)
this_checkout.makecheckout()
# verify module_docs works on this version
cmd = "python /tmp/docscript.py %s" % 'file'
rc, out, err = this_checkout.exec_command(cmd)
if rc != 0:
print '# %s unable to import module_docs: %s' % (this_branch, out)
continue
#import epdb; epdb.st()
for mkey in sorted(plan[plan_version].keys()):
# get all docs for this module at this version
cmd = "python /tmp/docscript.py %s" % mdict[mkey]['module']
rc, out, err = this_checkout.exec_command(cmd)
if rc == 0:
data = ast.literal_eval(out)
else:
data = None
if data:
for pkey in plan[plan_version][mkey]:
if pkey in data['options']:
print "VERIFIED: %s in %s with %s" % (pkey, mkey, this_branch)
else:
found = locate_parameter(aversions, mkey, pkey)
found = ",".join(sorted(found))
print "BAD: %s in %s with %s: found in %s" % \
(pkey, mkey, this_branch, found)
this_result = "%s;%s;%s;%s" % (mkey, pkey, plan_version, found)
results.append(this_result)
for line in results:
open("/tmp/results.csv", "a").write("%s\n" % line)
#####################
# MAIN #
#####################
if __name__ == "__main__":
mdict = pickle.load(open("/tmp/module-params.pickle", "rb"))
# mdict
# module_name:
# version_added: 1.6
# module: the name
# params:
# one:
# key:
# version_added: 1.6
open("/tmp/docscript.py", "wb").write(docscript)
aversions = get_versions()
plan = make_test_plan(aversions, mdict)
run_test_plan(plan, aversions, mdict)
#import epdb; epdb.st()
| [
"tanner.jc@gmail.com"
] | tanner.jc@gmail.com |
25c1fe6db107fe48d30cd7776ecd86b05fa9d636 | a75ac3c5c641fc00a3c403b08eeb6008f648639e | /CodeForces/Python2/330A.py | 55e60b11422a3ffb790bff55aa50324aa95a0ea8 | [] | no_license | Greenwicher/Competitive-Programming | 5e9e667867c2d4e4ce68ad1bc34691ff22e2400a | 6f830799f3ec4603cab8e3f4fbefe523f9f2db98 | refs/heads/master | 2018-11-15T15:25:22.059036 | 2018-09-09T07:57:28 | 2018-09-09T07:57:28 | 28,706,177 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 23 16:15:41 2015
@author: liuweizhi
"""
## version 1 (wrong anser)
r,c=map(int,raw_input().split());cake=''
for i in range(r):
cake+=raw_input()
for i in range(r):
cake[i:i+c+1]=[cake[i:i+c+1],'o'*c][cake[i:i+c+1].find('S')==-1]
for i in range(c):
cake[i::c]=[cake[i::c],'o'*c][cake[i::c].find('S')==-1]
print cake.count('o')
## version 2
r,c=map(int,raw_input().split());cake=[]
for i in range(r):
for j in raw_input():
cake.append(ord(j))
for i in range(r):
cake[i*c:(i+1)*c]=[cake[i*c:(i+1)*c],[99]*c][83 not in cake[i*c:(i+1)*c]]
for i in range(c):
cake[i::c]=[cake[i::c],[99]*r][83 not in cake[i::c]]
print sum(1 for i in cake if i==99)
| [
"weizhiliu2009@gmail.com"
] | weizhiliu2009@gmail.com |
373580f79b535f93764e02be1ee047390c697525 | c8eff17479e46abd759dfa4f627303cefecbb6f8 | /tests/test_transaction.py | fc0dfbc401183fd3e3bb39471c16e1bc69748ab6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | snower/torpeewee | 102832fe8ccc5fd7b18a1d634207be5869e71929 | 1d2d73090972ab33bb4a0980bfed63ff74961a1a | refs/heads/master | 2021-06-10T00:14:05.294889 | 2021-05-25T10:15:40 | 2021-05-25T10:15:40 | 61,872,177 | 31 | 10 | null | null | null | null | UTF-8 | Python | false | false | 2,956 | py | # -*- coding: utf-8 -*-
# 16/7/11
# create by: snower
import datetime
from tornado import gen
from tornado.testing import gen_test
from . import BaseTestCase
from .model import Test, db
class TestTestCaseTransaction(BaseTestCase):
async def run_transaction(self, transaction):
await Test.use(transaction).create(data="test_run_transaction", created_at=datetime.datetime.now(),
updated_at=datetime.datetime.now())
count = await Test.select().count()
assert count == 2, ""
count = await Test.use(transaction).select().count()
assert count == 3, ""
@gen_test
async def test(self):
await Test.delete()
await Test.create(data="test", created_at=datetime.datetime.now(), updated_at=datetime.datetime.now())
async with await db.transaction() as transaction:
await Test.use(transaction).create(data="test", created_at=datetime.datetime.now(),
updated_at=datetime.datetime.now())
count = await Test.select().count()
assert count == 1, ""
count = await Test.use(transaction).select().count()
assert count == 2, ""
t = await Test.use(transaction).select().order_by(Test.id.desc()).first()
td = t.data
t.data = "222"
await t.use(transaction).save()
t = await Test.use(transaction).select().order_by(Test.id.desc()).first()
assert t.data == '222'
t = await Test.select().order_by(Test.id.desc()).first()
assert t.data == td
await db.transaction()(self.run_transaction)()
transaction = await db.transaction()
try:
await self.run_transaction(transaction)
except:
await transaction.rollback()
else:
await transaction.commit()
async with await db.transaction() as transaction:
t = await Test.use(transaction).select().order_by(Test.id.desc()).first()
t.data = "aaa"
await t.use(transaction).save()
t = await Test.select().order_by(Test.id.desc()).first()
assert t.data == 'aaa'
async with await db.transaction() as transaction:
t = await Test.use(transaction).select().order_by(Test.id.desc()).first()
await t.use(transaction).delete_instance()
t = await Test.select().where(Test.id == t.id).first()
assert t is None
async with await db.transaction() as transaction:
await Test.use(transaction).update(data='12345')
t = await Test.select().order_by(Test.id.desc()).first()
assert t.data == '12345', ''
async with await db.transaction() as transaction:
await Test.use(transaction).delete()
c = await Test.select().count()
assert c == 0, ''
await Test.delete() | [
"sujian199@gmail.com"
] | sujian199@gmail.com |
23d788b0c075d0f561a991487206cf5a7acfed5c | a0a5dbdf9b850092deeee5f4918ab95232c46100 | /pesquisasatisfacao/crm/migrations/0012_auto_20190409_2110.py | 0990285ee556573e172d51a5baae72b6094e0878 | [] | no_license | CoutinhoElias/pesquisasatisfacao | 55c084a2b1e27cc9d190fb4198e09b78f3c95ad1 | 870eb616917ba9e2d3179609f8764534aa3748f4 | refs/heads/master | 2022-11-30T21:59:21.102520 | 2020-04-25T14:18:05 | 2020-04-25T14:18:05 | 158,157,292 | 1 | 2 | null | 2022-11-22T03:31:14 | 2018-11-19T03:34:11 | JavaScript | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.2 on 2019-04-10 00:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0011_atendimento_department'),
]
operations = [
migrations.AlterField(
model_name='atendimento',
name='department',
field=models.CharField(choices=[('0', 'Folha'), ('1', 'Contábil'), ('2', 'Fiscal'), ('3', 'Financeiro')], default='3', max_length=15, verbose_name='Departamento'),
),
]
| [
"coutinho.elias@gmail.com"
] | coutinho.elias@gmail.com |
32abe9d76e271c36d97481943ade34862a5989b9 | bb92245006848ceac733c14b118ef4b269daeee8 | /lab3/training/util.py | bb5cd23af014cdacb2bf3b4057fd6ff7fd5c58b1 | [] | no_license | vazzolla/fsdl-text-recognizer-project | 34a1be71469895b0daed48ed3bceafebd230645a | b32ffe24974a205dcf2a6ec0c440e318f0dbd940 | refs/heads/master | 2022-04-21T11:10:24.962783 | 2020-04-24T01:25:41 | 2020-04-24T01:25:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | """Function to train a model."""
from time import time
from tensorflow.keras.callbacks import EarlyStopping, Callback
from text_recognizer.datasets.dataset import Dataset
from text_recognizer.models.base import Model
EARLY_STOPPING = True
def train_model(model: Model, dataset: Dataset, epochs: int, batch_size: int, use_wandb: bool = False) -> Model:
"""Train model."""
callbacks = []
if EARLY_STOPPING:
early_stopping = EarlyStopping(monitor="val_loss", min_delta=0.01, patience=3, verbose=1, mode="auto")
callbacks.append(early_stopping)
model.network.summary()
t = time()
_history = model.fit(dataset=dataset, batch_size=batch_size, epochs=epochs, callbacks=callbacks)
print("Training took {:2f} s".format(time() - t))
return model
| [
"sergeykarayev@gmail.com"
] | sergeykarayev@gmail.com |
01e2cab011b97a2456551669f92df13f2a9127d8 | a1e10efa6a131e305351909a437bfa5d083d4513 | /aspl_product_alert_qty/models/product_qty_alert.py | 1f148b1fbe51eb77322158feda3c7018aa9cd7bc | [] | no_license | h3llopy/glodok_extra_addons_od12 | 5089412b36b0dafdb17235a627c8e33ed2acbb1f | 5c493962b93254fb2ca8cd674c4fe153ac86d680 | refs/heads/master | 2022-12-05T06:22:08.182302 | 2020-08-29T14:32:30 | 2020-08-29T14:32:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py | # -*- coding: utf-8 -*-
#################################################################################
# Author : Acespritech Solutions Pvt. Ltd. (<www.acespritech.com>)
# Copyright(c): 2012-Present Acespritech Solutions Pvt. Ltd.
# All Rights Reserved.
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#################################################################################
from odoo import models,fields,api
from odoo.exceptions import ValidationError
class ProductQtyAlert(models.Model):
_name = "product.qty.alert"
product_id = fields.Many2one('product.product')
location_id = fields.Many2one('stock.location',domain=[('usage','=','internal')])
alert_qty = fields.Float(string="Alert Quantity")
class InheritmailTemplate(models.Model):
_inherit = "mail.template"
use_for_alert_qty = fields.Boolean(string="Use For Quantity Alert")
class InheritProduct(models.Model):
_inherit = "product.product"
alert_product_ids = fields.One2many('product.qty.alert','product_id', string="Alerts")
same_for_all = fields.Boolean(string="Apply All", default=True)
alert_qty = fields.Float(string="Alert Quantity")
@api.multi
def btn_print_report(self):
datas = {'form': self.read()[0],
'ids': self.id,
'model': 'product.product'}
return self.env.ref('aspl_product_alert_qty.action_report_alert_qty').report_action(self, data=datas)
class ProductTemplate(models.Model):
_inherit = 'product.template'
# alert_product_ids = fields.One2many('product.qty.alert','product_id', string="Alerts")
same_for_all = fields.Boolean(string="Apply All", default=True)
alert_qty = fields.Float(string="Alert Quantity")
show_in_alert = fields.Boolean(default=False, string="Show In Alert Stock Report")
@api.constrains('same_for_all','alert_qty')
def constrains_alert_products(self):
for rec in self:
rec.product_variant_ids.write({
'same_for_all':rec.same_for_all,
'alert_qty':rec.alert_qty
}) | [
"kikin.kusumah@gmail.com"
] | kikin.kusumah@gmail.com |
f8e18d09a9f3e6a5653a428f33257917bcd09c34 | 2cd24ddd86e97d01c20a96bfc8ed8b541a80d608 | /apps/compra/migrations/0003_detalle_compra_subtotal.py | 0b37c02829a3de243b74a5129374ec3bcf73d820 | [] | no_license | chrisstianandres/don_chuta | 1c048db633246effb06800f28a3a4d8af2cac199 | e20abeb892e6de572a470cd71c5830c6f9d1dafa | refs/heads/master | 2023-01-07T09:14:52.837462 | 2020-11-18T20:55:10 | 2020-11-18T20:55:10 | 293,209,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # Generated by Django 2.2.14 on 2020-09-09 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('compra', '0002_auto_20200905_1332'),
]
operations = [
migrations.AddField(
model_name='detalle_compra',
name='subtotal',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=9),
),
]
| [
"Chrisstianandres@gmail.com"
] | Chrisstianandres@gmail.com |
dd9925b429dd30c9f5af8adf9362aadd5a58cd44 | 3bdcb60b0bffeeb6ff7b0ddca4792b682158bb12 | /Funciones/276.py | cad0e1b914f6b0d518c47a4347780a3f47f69055 | [] | no_license | FrankCasanova/Python | 03c811801ec8ecd5ace66914f984a94f12befe06 | 03f15100991724a49437df3ce704837812173fc5 | refs/heads/master | 2023-05-23T01:37:12.632204 | 2021-06-10T15:20:38 | 2021-06-10T15:20:38 | 278,167,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # desings a function that, given a list of strings, return the longest string.
# if two or more have the same length and are the lonfest, the function will
# return any one of them
def largest_str(string):
largest = None
for index, snts in enumerate(string):
if largest == None:
largest = snts
if len(snts) > len(largest):
largest = string[index]
return largest
list_strings = ['hola que pasa', 'hoy no quiero salir de mi casa la verdad',
'no sé por qué no me he quedado en casa']
print(largest_str(list_strings))
| [
"frankcasanova.info@gmail.com"
] | frankcasanova.info@gmail.com |
762b27f7e07d1145bfd1743c41531647c2a87ae0 | dfb00e98cc3bfe40df3a3f2196b1003d3122ca84 | /{{cookiecutter.project_slug}}/tests.py | 58a3817282e840fc961a8edb8268bcb73b622e5a | [] | no_license | andremcb/bakery_scaffold_tests_8sGLG7TckgT6EJlB | b80ed9ff7b34fe2433c9af1a9f07948674a2c781 | 08c8806dcbdca288d0f607350da8b5801489833c | refs/heads/master | 2020-07-09T03:42:09.226572 | 2019-08-22T20:13:03 | 2019-08-22T20:13:03 | 203,866,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import unittest
import re
class TestStripe(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestStripe, self).__init__(*args, **kwargs)
with open('order.html', 'r') as file_descriptor:
self.dom_str = file_descriptor.read()
# Check if redirectToCheckout function call is present
def test_redirect_to_checkout(self):
self.assertNotEqual(self.dom_str, '.redirectToCheckout',
'No stripe redirect call found!')
# Check if successUrl redirects to order_success.html
def test_successUrl(self):
self.assertRegex(self.dom_str,
r'successUrl: \'https:\/\/[a-z]*\.com/order_success\.html\'',
'No order_success.html redirect found on checkout success.')
# Check if cancelUrl redirects to order.html
def test_cancelUrl(self):
self.assertRegex(self.dom_str,
r'cancelUrl: \'https:\/\/[a-z]*\.com/order\.html\'',
'No order.html redirect found on checkout cancel.')
{{ cookiecutter.extra_data }}
if __name__ == '__main__':
unittest.main()
| [
"csantos.machado@gmail.com"
] | csantos.machado@gmail.com |
ae6671c6877541a49134ba53fef3117cf356e2e1 | 68ee9027d4f780e1e5248a661ccf08427ff8d106 | /extra/unused/baselinePlotter.py | 92136463546bb02edb9475a21ac2338f2a5cb2b9 | [
"MIT"
] | permissive | whyjz/CARST | 87fb9a6a62d39fd742bb140bddcb95a2c15a144c | 4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b | refs/heads/master | 2023-05-26T20:27:38.105623 | 2023-04-16T06:34:44 | 2023-04-16T06:34:44 | 58,771,687 | 17 | 4 | MIT | 2021-03-10T01:26:04 | 2016-05-13T20:54:42 | Python | UTF-8 | Python | false | false | 3,733 | py | #!/usr/bin/python
# baselinePlotter.py
# Author: Andrew Kenneth Melkonian
# All rights reserved
# Plots perpendicular baselines for all *baseline.rsc files in the input directory that contain a valid number identified as "P_BASELINE*"
# USAGE
# *****
# python /path/containing/ints /path/to/put/output.ps
# /path/containing/ints: Path to directory that contains all of the *baseline.rsc files to search and plot from
# /path/to/put/output.ps: Path to output image (baselines plot)
def baselinePlotter(input_dir, output_path):
import fnmatch;
import os;
import subprocess;
import sys;
import re;
assert os.path.exists(input_dir), "\n***** ERROR: " + input_dir + " does not exist\n";
output_dir = ".";
index = output_path.rfind("/");
if index > -1:
output_dir = output_path[ : index];
assert os.path.exists(output_dir), "\n***** ERROR: " + output_dir + " does not exist\n";
# contents = os.listdir(input_dir);
# date_dirs = [item for item in contents if os.path.isdir(item) and re.search("^\d{6}$", item)];
# outfile = open("temp_params.txt", "w");
# outfile.write("WorkPath = " + input_dir + "\n");
# outfile.write("DEM = " + input_dir + "\n");
# outfile.write("MaxBaseline = 10000\n");
# outfile.write("MinDateInterval = 1\n");
# outfile.write("MaxDateInterval = 100000\n");
# outfile.write("DataType = ERS\n");
# outfile.write("Angle = 23\n");
# outfile.write("rwin = 40\n");
# outfile.write("awin = 80\n");
# outfile.write("search_x = 8\n");
# outfile.write("search_y = 8\n");
# outfile.write("wsamp = 1\n");
# outfile.write("numproc = 1\n");
# outfile.close();
# print("\npython /data/akm/Python/pixelTack_new.py params.txt setup offsets\n");
baseline_dates = {};
baseline_values = {};
for root, dirnames, filenames in os.walk(input_dir):
for filename in fnmatch.filter(filenames, "*baseline.rsc"):
baseline_dates[root + "/" + filename] = filename[re.search("\d{6}_\d{6}", filename).start(0) : re.search("\d{6}_\d{6}", filename).end(0)];
p_b_t = "";
p_b_b = "";
for baseline_path in baseline_dates:
infile = open(baseline_path, "r");
for line in infile:
if line.find("P_BASELINE_TOP") > -1:
p_b_t = line.split()[1];
if line.find("P_BASELINE_BOTTOM") > -1:
p_b_b = line.split()[1];
infile.close();
p_b = abs(float(p_b_t) + float(p_b_b)) / 2;
baseline_values[p_b] = baseline_path;
sorted_p_b = sorted(baseline_values);
min_p_b = sorted_p_b[0];
max_p_b = sorted_p_b[len(sorted_p_b) - 1];
min_p_b = round(min_p_b, -2) - 50;
max_p_b = round(max_p_b, -2) + 50;
R = "-R0/" + str(len(baseline_values.values()) + 2) + "/" + str(min_p_b) + "/" + str(max_p_b);
ps_path = output_path;
cmd = "";
cmd +="\npsbasemap -Ba1f1:\"SAR Pair\":/a100f100:\"Average Baseline (m)\":WeSn -JX10c " + R + " -P -K > " + ps_path + "\n";
i = 1;
for p_b in sorted_p_b:
cmd += "\necho \"" + str(i) + " " + str(p_b) + "\" | psxy -JX10c " + R + " -Ss0.2c -Gred -W0.5p,darkgray -O -K >> " + ps_path + "\n";
cmd += "\necho \"" + str(float(i) + 0.1) + " " + str(p_b) + " 8p,1,black 0 LM " + baseline_dates[baseline_values[p_b]] + "\" | pstext -JX10c " + R + " -F+f+a+j -Gwhite -W1p,darkgray -O -K >> " + ps_path + "\n";
i += 1;
cmd = cmd[ : cmd.rfind("-K") - 1] + cmd[cmd.rfind("-K") + 2 : ];
cmd += "\nps2raster -A -Tf -D" + output_dir + " " + ps_path + "\n";
subprocess.call(cmd,shell=True);
return;
if __name__ == "__main__":
import os;
import sys;
assert len(sys.argv) > 2, "\n***** ERROR: baselinePlotter.py requires at least 2 arguments, " + str(len(sys.argv) - 1) + " given\n";
assert os.path.exists(sys.argv[1]), "\n***** ERROR: " + sys.argv[1] + " does not exist\n";
baselinePlotter(sys.argv[1], sys.argv[2]);
exit();
| [
"wz278@cornell.edu"
] | wz278@cornell.edu |
4ad295cd0d344c11d2f70ff4e00f9e00e52284a9 | 996967405d3ee07e011ee0f0404d03b6d04d3492 | /dataloader/get_coco/data_path.py | 7b3d7f36f674f57bbc35b16c04147efeb152513f | [] | no_license | wyyy04/MyRepository | 797936fc757a2eee4793d5b1b47ebf8b57216ab8 | 91f1a7ff969e91d9649b96796c5827c9910a8183 | refs/heads/main | 2023-02-22T09:56:21.926013 | 2021-01-27T15:34:00 | 2021-01-27T15:34:00 | 315,524,193 | 0 | 0 | null | 2020-11-24T07:30:05 | 2020-11-24T05:05:28 | null | UTF-8 | Python | false | false | 314 | py | #数据集实际路径
DataDir = 'loader\data\\'
Datasetfile = DataDir + 'dataset.txt'
Embeddingfile = DataDir + 'skipthoughts.npz'
Clusterfile = DataDir + 'clusters.npz'
Clustersnamefile = DataDir + 'clustersname.txt'
Clusterfile_256 = DataDir + 'clusters_256.npz'
ImageDir = DataDir + 'COCO_motivations_clean'
| [
"you@example.com"
] | you@example.com |
60675121234509c5ca112bd1aae23ca8213106e1 | 1e50f1643376039ca988d909e79f528e01fa1371 | /leetcode/editor/cn/464.我能赢吗.py | 94193d96b4637cc196d0af39d9c5bfbb1de75e69 | [] | no_license | mahatmaWM/leetcode | 482a249e56e2121f4896e34c58d9fa44d6d0034b | 4f41dad6a38d3cac1c32bc1f157e20aa14eab9be | refs/heads/master | 2022-09-04T17:53:54.832210 | 2022-08-06T07:29:46 | 2022-08-06T07:29:46 | 224,415,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | #
# @lc app=leetcode.cn id=464 lang=python3
#
# [464] 我能赢吗
#
# https://leetcode-cn.com/problems/can-i-win/description/
#
# algorithms
# Medium (33.86%)
# Likes: 131
# Dislikes: 0
# Total Accepted: 4.7K
# Total Submissions: 13.8K
# Testcase Example: '10\n11'
#
# 在 "100 game" 这个游戏中,两名玩家轮流选择从 1 到 10 的任意整数,累计整数和,先使得累计整数和达到 100 的玩家,即为胜者。
#
# 如果我们将游戏规则改为 “玩家不能重复使用整数” 呢?
#
# 例如,两个玩家可以轮流从公共整数池中抽取从 1 到 15 的整数(不放回),直到累计整数和 >= 100。
#
# 给定一个整数 maxChoosableInteger (整数池中可选择的最大数)和另一个整数
# desiredTotal(累计和),判断先出手的玩家是否能稳赢(假设两位玩家游戏时都表现最佳)?
#
# 你可以假设 maxChoosableInteger 不会大于 20, desiredTotal 不会大于 300。
#
# 示例:
#
# 输入:
# maxChoosableInteger = 10
# desiredTotal = 11
#
# 输出:
# false
#
# 解释:
# 无论第一个玩家选择哪个整数,他都会失败。
# 第一个玩家可以选择从 1 到 10 的整数。
# 如果第一个玩家选择 1,那么第二个玩家只能选择从 2 到 10 的整数。
# 第二个玩家可以通过选择整数 10(那么累积和为 11 >= desiredTotal),从而取得胜利.
# 同样地,第一个玩家选择任意其他整数,第二个玩家都会赢。
#
#
#
# @lc code=start
class Solution:
def canIWin(self, maxChoosableInteger: int, desiredTotal: int) -> bool:
def win(M, T, m, state):
if T <= 0: return False
if m[state] != 0: return m[state] == 1
i = 0
while i < M:
if state & (1 << i) > 0:
i += 1
continue
if not win(M, T - (i + 1), m, state | (1 << i)):
m[state] = 1
return True
i += 1
m[state] = -1
return False
# 特殊情况的处理
s = maxChoosableInteger * (maxChoosableInteger + 1) // 2
if s < desiredTotal: return False
if desiredTotal <= 0: return True
if s == desiredTotal: return maxChoosableInteger % 2 == 1
m = [0] * (1 << maxChoosableInteger)
return win(maxChoosableInteger, desiredTotal, m, 0)
# @lc code=end
| [
"chrismwang@tencent.com"
] | chrismwang@tencent.com |
db74737a5f19f4aabfff78ef6d8818a308c1b77b | f7a748eb6803a9f2b609dad279e30513497fa0be | /test/com/facebook/buck/testutil/endtoend/testdata/cxx_dependent_on_py/py_bin/generate_cpp.py | dd8e0082ceef3fbb3366dbddd661f943f8b28997 | [
"Apache-2.0"
] | permissive | MMeunierSide/buck | a44937e207a92a8a8d5df06c1e65308aa2d42328 | b1aa036a203acb8c4cf2898e0af2a1b88208d232 | refs/heads/master | 2020-03-09T23:25:38.016401 | 2018-04-11T04:11:59 | 2018-04-11T05:04:57 | 129,057,807 | 1 | 0 | Apache-2.0 | 2018-04-11T08:06:36 | 2018-04-11T08:06:35 | null | UTF-8 | Python | false | false | 305 | py | from py_lib.util import Util
def generate_cpp():
print('#include <generate_cpp/generated.h>\n')
print('Generated::Generated() {}\n')
print('std::string Generated::generated_fcn() {')
print('return "{}";'.format(Util().name))
print('}')
if __name__ == "__main__":
generate_cpp()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
6aa4e44998c98918ebf8e738493f4114fffe694a | 839c21c464a0161d221f73aecfee8e1a9cb0e281 | /tests/clock_test.py | f4df5ca1d934a3e1ef41beb090c5ea0a93e7b690 | [
"Apache-2.0"
] | permissive | al-fontes-jr/bardolph | 89c98da6645e98583251c95e9fa24816c21ad40b | 27504031d40d288be85bc51b82b6829e3f139d93 | refs/heads/master | 2022-02-02T03:20:52.996806 | 2022-01-12T11:12:04 | 2022-01-12T11:12:04 | 200,828,449 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | #!/usr/bin/env python
import unittest
from unittest.mock import patch
from bardolph.lib import clock, injection, settings, time_pattern
class MockNow:
def __init__(self, hour, minute):
self._hour = hour
self._minute = minute
@property
def hour(self):
self._minute += 1
if self._minute == 60:
self._minute = 0
self._hour += 1
if self._hour == 24:
self._hour = 0
return self._hour
@property
def minute(self):
return self._minute
def time_equals(self, hour, minute):
return hour == self._hour and minute == self._minute
class ClockTest(unittest.TestCase):
def setUp(self):
injection.configure()
self._precision = 0.1
settings.using({'sleep_time': self._precision}).configure()
def test_clock(self):
clk = clock.Clock()
clk.start()
time_0 = clk.et()
for _ in range(1, 10):
clk.wait()
time_1 = clk.et()
delta = time_1 - time_0
self.assertAlmostEqual(delta, self._precision, 1)
time_0 = time_1
clk.stop()
@patch('bardolph.lib.clock.datetime')
def test_time_pattern(self, patch_datetime):
mock_now = MockNow(9, 55)
patch_datetime.now = lambda: mock_now
clk = clock.Clock()
clk.start()
clk.wait_until(time_pattern.TimePattern.from_string('10:*'))
self.assertTrue(mock_now.time_equals(10, 0))
clk.wait_until(time_pattern.TimePattern.from_string('10:1*'))
self.assertTrue(mock_now.time_equals(10, 10))
clk.wait_until(time_pattern.TimePattern.from_string('10:*5'))
self.assertTrue(mock_now.time_equals(10, 15))
clk.stop()
if __name__ == '__main__':
unittest.main()
| [
"alfred@fontes.org"
] | alfred@fontes.org |
4f6e4f5031e1ca5479b504ef39a11043e741aa2d | 50c23021b19aef84c9c0ed8f8116b1b395df3205 | /linkipYQ/demo.py | 5df3cbeb710a5b05ab4275ba368a21319344e461 | [] | no_license | beforeuwait/code_daqsoft | d87891c6a409841dd495ab85aadb48cb348f9891 | 6178fdbc08a54b2827c1a80297684a628d4f9c08 | refs/heads/master | 2021-09-28T20:33:14.164879 | 2018-11-20T09:16:52 | 2018-11-20T09:16:52 | 108,245,470 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,191 | py | """
这是一个测试,作为测试linkip的服务器是否返回jessionid
"""
import requests
import time
import json
session = requests.session()
url_home = 'http://yq.linkip.cn/user/login.do'
url_log = 'http://yq.linkip.cn/user/index.do'
data_log = {
'name': 'gujing8835',
'password': 'gugugu110',
'type':1,
}
headers_login = {
'Host': 'yq.linkip.cn',
'Origin': 'http://yq.linkip.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
}
home_page = session.get(url_home, headers=headers_login)
jessionid = home_page.cookies.get('JSESSIONID')
cookie_text = ' userName=gujing8835; userPass=gugugu110; JSESSIONID=%s' %jessionid
cookies = {
"Cookies": cookie_text
}
login = session.post(url_log, headers=headers_login, cookies=cookies, data=data_log)
print(login.status_code)
print(login.text)
# headers = {
# 'Host': 'yq.linkip.cn',
# 'Origin': 'http://yq.linkip.cn',
# 'Referer': 'http://yq.linkip.cn/user/qwyq.do',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
# 'X-Requested-With': 'XMLHttpRequest',
# }
#
# data = {
# 'rangeId': 1,
# 'currPage': 1,
# 'themeId': 0,
# 'topicId': 0,
# 'sentiment': 1,
# 'type': 0,
# 'startDay': '2017-11-01 00:00',
# 'endDay': '2017-11-16 23:59',
# 'page': 500,
# 'allKeywords': '',
# 'orKeywords': '',
# 'noKeywords': '',
# 'tuKeywords': '',
# 'keyWordLocation': 5
# }
# theme = [
# # ('南充', '45234'), ('成都', '45344'),
# ('西安', '45345'), ('云南', '45346'), ('新疆', '45347')]
# tt = ['id', 'title', 'content', 'createtime', 'url', 'type', 'xss', 'source', 'score', 'sentiment']
# # 开始循环
# url = 'http://yq.linkip.cn/user/getdata.do'
#
# s1 = time.time()
#
# for city, id in theme:
# n ,page = 1, 1
# while n <= page:
# print(city, str(n))
# data['themeId'] = id
# data['currPage'] = n
# start = time.time()
# js = session.post(url, headers=headers, cookies=cookies,data=data, timeout=60)
# end = time.time()
# jessionid = js.headers.get('Set-Cookie', 'no')
# if jessionid != 'no':
# id = js.cookies.get('JSESSIONID', 'no')
# if id != 'no':
# cookie_text = ' userName=beforeuwait; userPass=forwjw2017; JSESSIONID=%s' % id
# cookies = {
# "Cookies": cookie_text
# }
# js_dict = json.loads(js.content.decode('utf8'))
# page = int(js_dict.get('pageNum', 1))
# result = js_dict.get('result', [])
# text = ''
# for each in result:
# text += '\u0001'.join([str(each.get(i, ''))for i in tt]).replace('\n', '').replace('\r', '').replace(' ', '') + '\n'
#
# with open('%s_data.txt' % city, 'a', encoding='utf8') as f:
# f.write(text)
# long = int(end - start)
# try:
# time.sleep(20 - long)
# except:
# continue
# n += 1
# s2 = time.time()
#
# print(s2 - s1) | [
"forme.wjw@aliyun.com"
] | forme.wjw@aliyun.com |
ac34a2fc2cecd9267afbbe09ea616ae446adea6e | cad762658ab8326d7f43bba6f69df35a8b770e34 | /pymarkdown/plugins/rule_md_005.py | fe3d1dad36078ec91062a727f67dc7ed40593565 | [
"MIT"
] | permissive | ExternalRepositories/pymarkdown | 9c248b519791a4c869d1e71fa405c06d15ce553b | 479ace2d2d9dd5def81c72ef3b58bce6fb76f594 | refs/heads/main | 2023-08-28T03:45:25.536530 | 2021-10-31T19:39:22 | 2021-10-31T19:39:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,109 | py | """
Module to implement a plugin that ensures that the indentation for List Items
are equivalent with each other.
"""
from enum import Enum
from pymarkdown.plugin_manager import Plugin, PluginDetails
class OrderedListAlignment(Enum):
"""
Enumeration to provide guidance on what alignment was used for ordered lists.
"""
UNKNOWN = 0
LEFT = 1
RIGHT = 2
class RuleMd005(Plugin):
"""
Class to implement a plugin that ensures that the indentation for List Items
are equivalent with each other.
"""
def __init__(self):
super().__init__()
self.__list_stack = None
self.__unordered_list_indents = {}
self.__ordered_list_starts = {}
self.__ordered_tokens = {}
self.__ordered_list_alignment = {}
def get_details(self):
"""
Get the details for the plugin.
"""
return PluginDetails(
plugin_name="list-indent",
plugin_id="MD005",
plugin_enabled_by_default=True,
plugin_description="Inconsistent indentation for list items at the same level",
plugin_version="0.5.0",
plugin_interface_version=1,
plugin_url="https://github.com/jackdewinter/pymarkdown/blob/main/docs/rules/rule_md005.md",
)
def starting_new_file(self):
"""
Event that the a new file to be scanned is starting.
"""
self.__list_stack = []
self.__unordered_list_indents = {}
self.__ordered_list_starts = {}
self.__ordered_tokens = {}
self.__ordered_list_alignment = {}
def __report_issue(self, context, token):
current_list_indent = len(self.__list_stack[-1].extracted_whitespace)
if self.__list_stack[0].is_unordered_list_start:
indent_adjust = self.__list_stack[0].column_number - 1
else:
indent_adjust = -1
token_indent = len(token.extracted_whitespace) - indent_adjust
expected_indent = (
len(self.__list_stack[-2].extracted_whitespace)
if token_indent <= current_list_indent
and len(self.__list_stack) > 1
and self.__list_stack[-2].is_list_start
else current_list_indent
)
extra_data = f"Expected: {expected_indent}; Actual: {token_indent}"
self.report_next_token_error(context, token, extra_data)
def __handle_ordered_list_item(self, context, token):
list_level = len(self.__list_stack)
list_alignment = self.__ordered_list_alignment[list_level]
if list_alignment == OrderedListAlignment.RIGHT:
assert self.__ordered_list_starts[list_level].extracted_whitespace
original_text = (
self.__ordered_list_starts[list_level].list_start_content
+ self.__ordered_list_starts[list_level].extracted_whitespace
)
original_text_length = len(original_text)
current_prefix_length = len(
f"{token.list_start_content}{token.extracted_whitespace}"
)
if original_text_length == current_prefix_length:
assert (
token.indent_level
== self.__ordered_list_starts[list_level].indent_level
)
else:
self.__report_issue(context, token)
elif (
self.__ordered_list_starts[list_level].column_number != token.column_number
):
self.__report_issue(context, token)
def __compute_ordered_list_alignment(self):
list_level = len(self.__list_stack)
last_length = 0
last_token = None
for next_token in self.__ordered_tokens[list_level]:
content_length = len(next_token.list_start_content)
if not last_length:
last_length = content_length
last_token = next_token
elif content_length != last_length:
if last_token.column_number == next_token.column_number:
self.__ordered_list_alignment[
list_level
] = OrderedListAlignment.LEFT
break
last_total_length = len(last_token.extracted_whitespace) + len(
last_token.list_start_content
)
next_total_length = len(next_token.extracted_whitespace) + len(
next_token.list_start_content
)
if last_total_length == next_total_length:
self.__ordered_list_alignment[
list_level
] = OrderedListAlignment.RIGHT
break
def __handle_unordered_list_start(self, context, token):
self.__list_stack.append(token)
list_level = len(self.__list_stack)
if list_level not in self.__unordered_list_indents:
self.__unordered_list_indents[list_level] = token.indent_level
if self.__unordered_list_indents[list_level] != token.indent_level:
self.__report_issue(context, token)
def __handle_ordered_list_start(self, token):
self.__list_stack.append(token)
list_level = len(self.__list_stack)
self.__ordered_tokens[list_level] = []
self.__ordered_tokens[list_level].append(token)
if list_level not in self.__ordered_list_starts:
self.__ordered_list_starts[list_level] = token
self.__ordered_list_alignment[list_level] = OrderedListAlignment.UNKNOWN
def __handle_list_item(self, context, token):
if self.__list_stack[-1].is_unordered_list_start:
if (
self.__unordered_list_indents[len(self.__list_stack)]
!= token.indent_level
):
self.__report_issue(context, token)
else:
self.__ordered_tokens[len(self.__list_stack)].append(token)
def __handle_list_end(self, context, token):
if token.is_ordered_list_end:
list_level = len(self.__list_stack)
if (
self.__ordered_list_alignment[list_level]
== OrderedListAlignment.UNKNOWN
):
self.__compute_ordered_list_alignment()
for next_token in self.__ordered_tokens[list_level]:
self.__handle_ordered_list_item(context, next_token)
del self.__list_stack[-1]
if not self.__list_stack:
self.__unordered_list_indents = {}
self.__ordered_list_starts = {}
def next_token(self, context, token):
"""
Event that a new token is being processed.
"""
if token.is_unordered_list_start:
self.__handle_unordered_list_start(context, token)
elif token.is_ordered_list_start:
self.__handle_ordered_list_start(token)
elif token.is_unordered_list_end or token.is_ordered_list_end:
self.__handle_list_end(context, token)
elif token.is_new_list_item:
self.__handle_list_item(context, token)
| [
"jack.de.winter@outlook.com"
] | jack.de.winter@outlook.com |
63af7334751636287b81565df83c25a6a899d950 | cc0e381fde5cc6870770396d990d2bad66a3186c | /Aula/aula09t.py | 72aa8fa7baa34cbfc5ff53c13f1e74ca9d562308 | [] | no_license | jnthmota/Python-PySpark-Cursos | 2c7fac79867059e0dfe4f0c4b6b6e1d32260530f | 680a4c422e14a26036379f49f0de6b5e73d7e431 | refs/heads/main | 2023-08-15T00:22:59.189649 | 2021-09-12T23:00:39 | 2021-09-12T23:00:39 | 373,610,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | ''' Nessa aula, vamos aprender operações com String no Python. As principais operações que vamos aprender
são o Fatiamento de String, Análise com len(), count(), find(), transformações com replace(),
upper(), lower(), capitalize(), title(), strip(), junção com join(). '''
frase = 'Curso em Video Python'
frase = [9] #Vai pega a nona letra da lista
frase = [9:13] = #Vai pega o intervalo de 9:13 que seria 'Vide' fatiamento sempre pega um a menos
frase = [9:21] = # Vai pega o intervalo de 9:21 'Video Python'
frase = [9:21:2] = # Vai pega o intervalo de 9:21 porem saltando de 2 em 2 = 'Vdo Pto'
frase = [:5] = # Vai pega os 5 primeiros do fatiamento = 'Curso'
frase = [15:] = # Vai pega de os ultimos carateres da fatima do 15 ate o ultimo = 'Python'
frase = [9::3] = #Vai começa no 9:: e vai ate o final 'Video Python', :3 vai pular em 3 , = 'Ve Ph'
#Analise
len(frase) # Qual o comprimento da frase? len de frase seria 21 Caracteres
frase.count('o') # Contar quantas vezes aparece a letra 'o'(minuscula) case sensitive da frase = 3
frase.count('o',0,13) # Contagem com fatiamento vai considerar do 0 até o 13 = apenas 1 'o'
frase.find('deo') # Quantas vezes ele encontrou a frase 'deo' = [11]
frase.find('Android') # Vai te retornar um -1 então significa que essa palavra não foi encontrada na lista
'Curso' in frase # Existe curso em frase ? se houver será {True}
#Transformação
frase.replace('Python','Android') # Vai substituir Python por Android
frase.upper() # METODO UPPER, VAI FICAR TUDO MAIUSCULO
frase.lower() # METODO LOWER, vai fica tudo minusculo
frase.capitalize() #METODO CAPITALIZE, vai joga toda a frase minuscula, porém a primeira letra Maiuscula
frase.title() # METODO TITLE, vai fazer uma analise mais profunda, verificar onde tem uma quebra de espaço e o
#primeiro caracter e transformar em maiusculo
frase.strip() # METODO STRIP, remove os espaços inuteis no começo e no final
frase.rstrip() # METEDO STRIP, vai tratar o lado direto da string, ou seja so o final
frase.lstrip()# METEDO STRIP, vai tratar o lado esquerda da string, ou seja so o começo
# Divisão
frase.split() # METODO SPLIT, vai ocorrer uma divisão entre os espaços da frase, uma nova indexão
# [Curso] [em] [Video] [Python] 0-3 lista
#Junção
'-'.join(frase) # Juntar todos os elementos e vai usar '-' como separador = Curso-em-Video-Python | [
"jonathan.mota@outlook.com"
] | jonathan.mota@outlook.com |
06d2c50aca5d7686ebe3b4d52fcec48ca04d8574 | ab8117bc5b5040e5107fc59337fabc966cb062ba | /.history/twitter/engine_20200328113924.py | 61ac75e432040b96d6f549e92d793cc646966e14 | [] | no_license | mirfarzam/DownloaderBro | 6019ab561c67a397135d0a1585d01d4c6f467df4 | 8e0a87dd1f768cfd22d24a7f8c223ce968e9ecb6 | refs/heads/master | 2022-04-16T15:31:38.551870 | 2020-04-15T17:36:26 | 2020-04-15T17:36:26 | 255,090,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py |
import tweepy
import datetime
import configparser
import time
config = configparser.ConfigParser()
config.read('credential.conf')
consumer_key = config['API']["API_key"]
consumer_secret = config['API']["API_secret_key"]
access_token = config['ACCESS']["Access_token"]
access_token_secret = config['ACCESS']["Access_token_secert"]
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# api.verify_credentials()
def check_mentions(api, keywords, since_id):
new_since_id = since_id
for tweet in tweepy.Cursor(api.mentions_timeline,
since_id=since_id).items():
new_since_id = max(tweet.id, new_since_id)
if tweet.in_reply_to_status_id is None:
continue
main = (api.statuses_lookup([tweet.in_reply_to_status_id], include_entities=True ))[0]
final_video = None
try :
if 'media' in main.extended_entities:
maxBit = 0
maxURL = None
for video in main.extended_entities['media'][0]['video_info']['variants']:
try:
# print(f"{video['bitrate']} and is {video['url']}")
if video['bitrate'] > maxBit:
maxBit = video['bitrate']
maxURL = video['url']
except:
# print(f"Error in finding video in tweet id : {main.id}")
continue
# final_video = max(MyCount, key=int)
if maxURL is not None:
api.update_status(f'{tweet.user.screen_name} Hi Bro! this is the Link {maxURL}', in_reply_to_status_id = tweet.id)
except:
# print(f"Cannot get Tweet video and tweet id is : {main.id}")
continue
# print(final_video)
return new_since_id
since_id = 1
while True:
since_id = check_mentions(api, ["help", "support"], since_id)
time.sleep(10) | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
22ba8cb74c5168ff4addd6a49abdd18dfae337b2 | 03cf49d6e2b002e5dc389282feb28769c55ff493 | /feas/gen_w2v_feat.py | 3915aa239d7723106ab8dbe88cdd6641e5588d71 | [] | no_license | yanqiangmiffy/Cityproperty-Rent-Forecast | 8f76ecf237e6bb2b7a81e844136dd12b324ee0bd | 489808ff0748d47fc34ff7c8f6168fe3fa8e39f2 | refs/heads/master | 2021-06-30T03:03:02.494347 | 2019-06-10T09:26:00 | 2019-06-10T09:26:00 | 182,541,053 | 7 | 0 | null | 2020-11-17T15:34:45 | 2019-04-21T14:04:40 | Jupyter Notebook | UTF-8 | Python | false | false | 2,711 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: quincyqiang
@software: PyCharm
@file: gen_w2v_feat.py
@time: 2019-05-17 09:56
@description: 生成Word2Vec特征
"""
import pandas as pd
import warnings
from gensim.models import Word2Vec
import multiprocessing
warnings.filterwarnings('ignore')
def w2v_feat(data_frame, feat, mode):
for i in feat:
if data_frame[i].dtype != 'object':
data_frame[i] = data_frame[i].astype(str)
data_frame.fillna('nan', inplace=True)
print(f'Start {mode} word2vec ...')
model = Word2Vec(data_frame[feat].values.tolist(), size=L, window=2, min_count=1,
workers=multiprocessing.cpu_count(), iter=10)
stat_list = ['min', 'max', 'mean', 'std']
new_all = pd.DataFrame()
for m, t in enumerate(feat):
print(f'Start gen feat of {t} ...')
tmp = []
for i in data_frame[t].unique():
tmp_v = [i]
tmp_v.extend(model[i])
tmp.append(tmp_v)
tmp_df = pd.DataFrame(tmp)
w2c_list = [f'w2c_{t}_{n}' for n in range(L)]
tmp_df.columns = [t] + w2c_list
tmp_df = data_frame[['ID', t]].merge(tmp_df, on=t)
tmp_df = tmp_df.drop_duplicates().groupby('ID').agg(stat_list).reset_index()
tmp_df.columns = ['ID'] + [f'{p}_{q}' for p in w2c_list for q in stat_list]
if m == 0:
new_all = pd.concat([new_all, tmp_df], axis=1)
else:
new_all = pd.merge(new_all, tmp_df, how='left', on='ID')
return new_all
if __name__ == '__main__':
L = 10
df_train = pd.read_csv('../input/train_data.csv')
df_test = pd.read_csv('../input/test_a.csv')
# ------------------ 过滤数据 begin ----------------
print("根据tradeMoney过滤数据:", len(df_train))
df_train = df_train.query("500<=tradeMoney<25000") # 线下 lgb_0.876612870005764
print("filter tradeMoney after:", len(df_train))
categorical_feas = ['rentType', 'houseFloor', 'houseToward', 'houseDecoration']
new_all_train = w2v_feat(df_train, categorical_feas, 'train')
new_all_test = w2v_feat(df_test, categorical_feas, 'test')
train = pd.merge(df_train, new_all_train, on='ID', how='left')
valid = pd.merge(df_test, new_all_test, on='ID', how='left')
print(f'Gen train shape: {train.shape}, test shape: {valid.shape}')
drop_train = train.T.drop_duplicates().T
drop_valid = valid.T.drop_duplicates().T
features = [i for i in drop_train.columns if i in drop_valid.columns]
print('features num: ', len(features) - 1)
train[features + ['tradeMoney']].to_csv('../input/train_w2v.csv', index=False)
valid[features].to_csv('../input/test_w2v.csv', index=False)
| [
"1185918903@qq.com"
] | 1185918903@qq.com |
ca42f38050315ce2042b442a3f10da5c7f56c249 | 897d82d4953ed7b609746a0f252f3f3440b650cb | /day17/homework/homework_personal.py | a6297f4d42168de99f07f4198453d7482d5ba418 | [] | no_license | haiou90/aid_python_core | dd704e528a326028290a2c18f215b1fd399981bc | bd4c7a20950cf7e22e8e05bbc42cb3b3fdbe82a1 | refs/heads/master | 2022-11-26T19:13:36.721238 | 2020-08-07T15:05:17 | 2020-08-07T15:05:17 | 285,857,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | from iterable_tools import IterableHelper
"""直接使用IterableHelper类现有功能,完成下列需求
-- 在技能列表中查找名称是"一阳指"的技能对象
-- 在技能列表中查找攻击比例atk_rate大于1的所有技能对象
-- 在技能列表中所有技能名称name和消耗法力cost_sp"""
class Skill:
def __init__(self, name="", atk_rate=0.0, cost_sp=0, duration=0):
self.name = name
self.atk_rate = atk_rate
self.cost_sp = cost_sp
self.duration = duration
list_skills = [
Skill("横扫千军", 1, 50, 5),
Skill("九阳神功", 3, 150, 6),
Skill("降龙十八掌", 3, 150, 5),
Skill("一阳指", 1.2, 0, 2),
Skill("乾坤大挪移", 3.2, 30, 2),
Skill("打狗棍", 1.3, 0, 6),
]
result = IterableHelper.find_single(list_skills,lambda emp:emp.name == "一阳指")
print(result.__dict__)
for list_skill in IterableHelper.find_all(list_skills,lambda emp:emp.atk_rate>1):
print(list_skill.__dict__)
for list_skill in IterableHelper.select(list_skills,lambda emp:(emp.name,emp.atk_rate)):
print(list_skill)
class IterableHelper:
@staticmethod
def find_single(iterable,func):
for item in iterable:
if func(item):
return item
| [
"caoho@outlook.com"
] | caoho@outlook.com |
82e273899b8bdc46203d6b1f1e2254b20264b9a2 | f497916365288386bd2fc5085ce1391aa649467b | /pactools/utils/fir.py | eb7ec457d6bb77f43deaf0c55855dcc4adba5c82 | [] | no_license | fraimondo/pactools | 486d5eac4fd9190dcbbcee24fa735ac511aa396b | b4be8ae27cca1684816772f7ce2cb9e503452f14 | refs/heads/master | 2021-08-30T01:02:51.161092 | 2017-12-05T16:19:05 | 2017-12-05T16:19:05 | 113,920,577 | 0 | 0 | null | 2017-12-11T23:40:13 | 2017-12-11T23:40:12 | null | UTF-8 | Python | false | false | 8,162 | py | import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from .spectrum import Spectrum
class FIR(object):
"""FIR filter
Parameters
----------
fir : array
Finite impulse response (FIR) filter
Examples
--------
>>> from pactools.utils.fir import FIR
>>> f = FIR(fir=[0.2, 0.6, 0.2])
>>> f.plot()
>>> signal_out = f.transform(signal_in)
"""
def __init__(self, fir=np.ones(1), fs=1.0):
self.fir = fir
self.fs = fs
def transform(self, sigin):
"""Apply this filter to a signal
Parameters
----------
sigin : array, shape (n_points, ) or (n_signals, n_points)
Input signal
Returns
-------
filtered : array, shape (n_points, ) or (n_signals, n_points)
Filtered signal
"""
sigin_ndim = sigin.ndim
sigin = np.atleast_2d(sigin)
filtered = [signal.fftconvolve(sig, self.fir, 'same') for sig in sigin]
if sigin_ndim == 1:
filtered = filtered[0]
else:
filtered = np.asarray(filtered)
return filtered
def plot(self, axs=None, fscale='log'):
"""
Plots the impulse response and the transfer function of the filter.
"""
# validate figure
fig_passed = axs is not None
if axs is None:
fig, axs = plt.subplots(nrows=2)
else:
axs = np.atleast_1d(axs)
if np.any([not isinstance(ax, plt.Axes) for ax in axs]):
raise TypeError('axs must be a list of matplotlib Axes, got {}'
' instead.'.format(type(axs)))
# test if is figure and has 2 axes
if len(axs) < 2:
raise ValueError('Passed figure must have at least two axes'
', given figure has {}.'.format(len(axs)))
fig = axs[0].figure
# compute periodogram
fft_length = max(int(2 ** np.ceil(np.log2(self.fir.shape[0]))), 2048)
s = Spectrum(fft_length=fft_length, block_length=self.fir.size,
step=None, fs=self.fs, wfunc=np.ones, donorm=False)
s.periodogram(self.fir)
s.plot('Transfer function of FIR filter', fscale=fscale,
axes=axs[0])
# plots
axs[1].plot(self.fir)
axs[1].set_title('Impulse response of FIR filter')
axs[1].set_xlabel('Samples')
axs[1].set_ylabel('Amplitude')
if not fig_passed:
fig.tight_layout()
return fig
class BandPassFilter(FIR):
"""Band-pass FIR filter
Designs a band-pass FIR filter centered on frequency fc.
Parameters
----------
fs : float
Sampling frequency
fc : float
Center frequency of the bandpass filter
n_cycles : float or None, (default 7.0)
Number of oscillation in the wavelet. None if bandwidth is used.
bandwidth : float or None, (default None)
Bandwidth of the FIR wavelet filter. None if n_cycles is used.
zero_mean : boolean, (default True)
If True, the mean of the FIR is subtracted, i.e. fir.sum() = 0.
extract_complex : boolean, (default False)
If True, the wavelet filter is complex and ``transform`` returns two
signals, filtered with the real and the imaginary part of the filter.
Examples
--------
>>> from pactools.utils import BandPassFilter
>>> f = BandPassFilter(fs=100., fc=5., bandwidth=1., n_cycles=None)
>>> f.plot()
>>> signal_out = f.transform(signal_in)
"""
def __init__(self, fs, fc, n_cycles=7.0, bandwidth=None, zero_mean=True,
extract_complex=False):
self.fc = fc
self.fs = fs
self.n_cycles = n_cycles
self.bandwidth = bandwidth
self.zero_mean = zero_mean
self.extract_complex = extract_complex
self._design()
def _design(self):
"""Designs the FIR filter"""
# the length of the filter
order = self._get_order()
half_order = (order - 1) // 2
w = np.blackman(order)
t = np.linspace(-half_order, half_order, order)
phase = (2.0 * np.pi * self.fc / self.fs) * t
car = np.cos(phase)
fir = w * car
# the filter must be symmetric, in order to be zero-phase
assert np.all(np.abs(fir - fir[::-1]) < 1e-15)
# remove the constant component by forcing fir.sum() = 0
if self.zero_mean:
fir -= fir.sum() / order
gain = np.sum(fir * car)
self.fir = fir * (1.0 / gain)
# add the imaginary part to have a complex wavelet
if self.extract_complex:
car_imag = np.sin(phase)
fir_imag = w * car_imag
self.fir_imag = fir_imag * (1.0 / gain)
return self
def _get_order(self):
if self.bandwidth is None and self.n_cycles is not None:
half_order = int(float(self.n_cycles) / self.fc * self.fs / 2)
elif self.bandwidth is not None and self.n_cycles is None:
half_order = int(1.65 * self.fs / self.bandwidth) // 2
else:
raise ValueError('fir.BandPassFilter: n_cycles and bandwidth '
'cannot be both None, or both not None. Got '
'%s and %s' % (self.n_cycles, self.bandwidth, ))
order = half_order * 2 + 1
return order
def transform(self, sigin):
"""Apply this filter to a signal
Parameters
----------
sigin : array, shape (n_points, ) or (n_signals, n_points)
Input signal
Returns
-------
filtered : array, shape (n_points, ) or (n_signals, n_points)
Filtered signal
(filtered_imag) : array, shape (n_points, ) or (n_signals, n_points)
Only when extract_complex is true.
Filtered signal with the imaginary part of the filter
"""
filtered = super(BandPassFilter, self).transform(sigin)
if self.extract_complex:
fir = FIR(fir=self.fir_imag, fs=self.fs)
filtered_imag = fir.transform(sigin)
return filtered, filtered_imag
else:
return filtered
def plot(self, axs=None, fscale='log'):
"""
Plots the impulse response and the transfer function of the filter.
"""
fig = super(BandPassFilter, self).plot(axs=axs, fscale=fscale)
if self.extract_complex:
if axs is None:
axs = fig.axes
fir = FIR(fir=self.fir_imag, fs=self.fs)
fir.plot(axs=axs, fscale=fscale)
return fig
class LowPassFilter(FIR):
"""Low-pass FIR filter
Designs a FIR filter that is a low-pass filter.
Parameters
----------
fs : float
Sampling frequency
fc : float
Cut-off frequency of the low-pass filter
bandwidth : float
Bandwidth of the FIR wavelet filter
ripple_db : float (default 60.0)
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband, in Kaiser-window low-pass FIR filter.
Examples
--------
>>> from pactools.utils import LowPassFilter
>>> f = LowPassFilter(fs=100., fc=5., bandwidth=1.)
>>> f.plot()
>>> signal_out = f.transform(signal_in)
"""
def __init__(self, fs, fc, bandwidth, ripple_db=60.0):
self.fs = fs
self.fc = fc
self.bandwidth = bandwidth
self.ripple_db = ripple_db
self._design()
def _design(self):
# Compute the order and Kaiser parameter for the FIR filter.
N, beta = signal.kaiserord(self.ripple_db,
self.bandwidth / self.fs * 2)
# Use firwin with a Kaiser window to create a lowpass FIR filter.
fir = signal.firwin(N, self.fc / self.fs * 2, window=('kaiser', beta))
# the filter must be symmetric, in order to be zero-phase
assert np.all(np.abs(fir - fir[::-1]) < 1e-15)
self.fir = fir / np.sum(fir)
return self
| [
"tom.dupre-la-tour@m4x.org"
] | tom.dupre-la-tour@m4x.org |
6fa3358d03e945e11123f1d3a3c4b23061069cf8 | 3577d2e20c79cbbbc0a8a91a73be322be61cf384 | /5.4 Objects and Algorithms/1 Object/4 ExerciseTraker2.py | ac6399889c32350a3735b2e6358e11bc1c49284d | [] | no_license | KETULPADARIYA/Computing-in-Python | cb30a807fa92a816f53f3254a63f07883977406d | 02c69a3c074924a70f68f00fd756436aa207dcf6 | refs/heads/master | 2020-04-24T04:47:18.342215 | 2019-03-17T03:21:46 | 2019-03-17T03:21:46 | 171,716,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | #Previously, you wrote a class called ExerciseSession that
#had three attributes: an exercise name, an intensity, and a
#duration.
#
#Add a new method to that class called calories_burned.
#calories_burned should have no parameters (besides self, as
#every method in a class has). It should return an integer
#representing the number of calories burned according to the
#following formula:
#
# - If the intensity is "Low", 4 calories are burned per
# minute.
# - If the intensity is "Moderate", 8 calories are burned
# per minute.
# - If the intensity is "High", 12 calories are burned per
# minute.
#
#You may copy your class from the previous exercise and just
#add to it.
#Add your code here!
class ExerciseSession():
def __init__(self,exercise,intensity,duration):
self.exercise=exercise
self.intensity=intensity
self.duration=duration
def get_exercise(self):
return self.exercise
def get_intensity(self):
return self.intensity
def get_duration(self):
return self.duration
def set_exercise(self,value):
self.exercise = value
def set_intensity(self,value):
self.intensity = value
def set_duration(self,value):
self.duration = value
def calories_burned(self):
if self.intensity == "Low":
return self.duration*4
if self.intensity == "Moderate":
return self.duration*8
if self.intensity == "High":
return self.duration*12
#If your code is implemented correctly, the lines below
#will run error-free. They will result in the following
#output to the console:
#240
#360
new_exercise = ExerciseSession("Running", "Low", 60)
print(new_exercise.calories_burned())
new_exercise.set_exercise("Swimming")
new_exercise.set_intensity("High")
new_exercise.set_duration(30)
print(new_exercise.calories_burned())
| [
"ketulpadariya79@gmail.com"
] | ketulpadariya79@gmail.com |
3419e878d3d631a1d81f03dea1450003ab85c1bb | f39528e9bad8cfa78b38fcbb7a5b430ac0c7a942 | /Higgs2LLP/LO_HToSSTobbbb_MH125_MS55_ctauS0p05_13TeV.py | 0a392bd289634904035c154b54547bec66ebdeaf | [] | no_license | isildakbora/EXO-MC-REQUESTS | c0e3eb3a49b516476d37aa464c47304df14bed1e | 8771e32bbec079de787f7e5f11407e9e7ebe35d8 | refs/heads/master | 2021-04-12T11:11:03.982564 | 2019-04-29T15:12:34 | 2019-04-29T15:12:34 | 126,622,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.Pythia8PowhegEmissionVetoSettings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8PSweightsSettingsBlock,
pythia8PowhegEmissionVetoSettingsBlock,
processParameters = cms.vstring(
'9000006:all = sk skbar 0 0 0 55 3.9464e-12 1.0 75.0 0.05',
'9000006:oneChannel = 1 1.0 101 5 -5',
'9000006:mayDecay = on',
'9000006:isResonance = on',
'25:m0 = 125.0',
'25:onMode = off',
'25:addChannel = 1 0.000000001 101 9000006 -9000006',
'25:onIfMatch = 9000006 -9000006',
'9000006:onMode = off',
'9000006:onIfAny = 5',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings'
'pythia8PowhegEmissionVetoSettings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"bora.isildak@cern.ch"
] | bora.isildak@cern.ch |
701a3b7f4b23d31ebeab4b41f626f756478f746b | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/storagesync/v20190301/get_storage_sync_service.py | 2cf11b6677bb8a2310b698ed26983f397eb033f8 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,861 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetStorageSyncServiceResult',
'AwaitableGetStorageSyncServiceResult',
'get_storage_sync_service',
]
@pulumi.output_type
class GetStorageSyncServiceResult:
"""
Storage Sync Service object.
"""
def __init__(__self__, location=None, name=None, storage_sync_service_status=None, storage_sync_service_uid=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_sync_service_status and not isinstance(storage_sync_service_status, int):
raise TypeError("Expected argument 'storage_sync_service_status' to be a int")
pulumi.set(__self__, "storage_sync_service_status", storage_sync_service_status)
if storage_sync_service_uid and not isinstance(storage_sync_service_uid, str):
raise TypeError("Expected argument 'storage_sync_service_uid' to be a str")
pulumi.set(__self__, "storage_sync_service_uid", storage_sync_service_uid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageSyncServiceStatus")
def storage_sync_service_status(self) -> int:
"""
Storage Sync service status.
"""
return pulumi.get(self, "storage_sync_service_status")
@property
@pulumi.getter(name="storageSyncServiceUid")
def storage_sync_service_uid(self) -> str:
"""
Storage Sync service Uid
"""
return pulumi.get(self, "storage_sync_service_uid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
class AwaitableGetStorageSyncServiceResult(GetStorageSyncServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStorageSyncServiceResult(
location=self.location,
name=self.name,
storage_sync_service_status=self.storage_sync_service_status,
storage_sync_service_uid=self.storage_sync_service_uid,
tags=self.tags,
type=self.type)
def get_storage_sync_service(resource_group_name: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStorageSyncServiceResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['storageSyncServiceName'] = storage_sync_service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:storagesync/v20190301:getStorageSyncService', __args__, opts=opts, typ=GetStorageSyncServiceResult).value
return AwaitableGetStorageSyncServiceResult(
location=__ret__.location,
name=__ret__.name,
storage_sync_service_status=__ret__.storage_sync_service_status,
storage_sync_service_uid=__ret__.storage_sync_service_uid,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
b2d9a6ad2e37068d9a8111e66a169c2f35cd83cf | cceb97ce3d74ac17090786bc65f7ed30e37ad929 | /lxd_Safety(out)/graphTraversal-submit2/kvparser.py | cf5a418fcf027d4e48d2a709fddd788aae180815 | [] | no_license | Catxiaobai/project | b47310efe498421cde794e289b4e753d843c8e40 | 76e346f69261433ccd146a3cbfa92b4e3864d916 | refs/heads/master | 2023-01-08T04:37:59.232492 | 2020-11-10T12:00:34 | 2020-11-10T12:00:34 | 291,014,545 | 1 | 4 | null | 2020-11-09T01:22:11 | 2020-08-28T10:08:16 | Python | UTF-8 | Python | false | false | 7,539 | py | """kvparser -- A simple hierarchical key-value text parser.
(c) 2004 HAS
kvparser parses text containing simple key-value pairs and/or nested blocks using a simple event-driven model. The text format is intended to be human readable and writeable, so is designed for simplicity and consistency with a low-noise syntax. The error handling for malformed text is strict by default - the text format is simple enough that it should not be hard to write valid text.
Example text:
#######
person:
first-name=Joe
last-name=Black
email:
nickname=joe.black
address=joe@foo.com
email:
nickname=fuzzy
address=fuzzy@bar.org
#######
1. Simple key-value pairs take the form:
NAME=VALUE
NAME must contain only alphanumeric and/or hyphen characters, and be at least 1 character in length with the first character being a letter. (Note: Periods are permitted in names as well. However, these should be used only to indicate ad-hoc namespaces, e.g. 'foo.bar' where 'bar' is an attribute of the 'foo' namespace.)
NAME and VALUE are separated by an '=' (equals) character. Whitespace before the '=' is not permitted. Everything after the '=' is the VALUE.
VALUE can contain any characters except newline, and may be 0 or more characters in length.
Each line must end in a newline (ASCII 10) character.
The Parser class provides backslash escaping for the following characters in VALUE:
\n --> newline character (ASCII 10)
\r --> return character (ASCII 13)
\t --> tab character (ASCII 9)
\\ --> \
2. Key-value blocks are indicated by the line:
NAME:
followed by zero or more lines containing simple key-value pairs and/or blocks indented with a single tab character (ASCII 9).
The colon must be followed immediately by a newline character; trailing whitespace and other characters is not allowed.
Blocks can be nested within other blocks to any depth.
3. Empty lines and lines containing only tabs are permitted; these are simply ignored.
4. Full-line comments are permitted; any line beginning with zero or more tabs followed by '#' is ignored.
5. The parser will, by default, raise a ParseError if it encounters a malformed key-value item or block, or an incorrectly indented line. This behaviour can be overridden in subclasses if desired.
#######
NOTES
- See parser classes and test code for more information and examples of use.
- The restricted NAME format ensures names can be directly mapped to C-style identifiers simply by substituting the hyphen with an underscore.
- kvparser doesn't [yet?] provide any special features for working with NAME namespaces.
"""
# kvparser -- A simple key-value text parser with support for nested blocks.
#
# Copyright (C) 2004 HAS <hamish.sanderson@virgin.net>
#
# This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# coding = utf-8
import re as _re
_emptyLinePattern = _re.compile('\t*$|\t*#')
_parseLinePattern = _re.compile('(\t*)([.a-zA-Z][.a-zA-Z0-9-]+)(?:(:)|=(.*))')
class ParseError(Exception):
def __init__(self, msg, lineNo, lineStr):
self.msg = msg
self.lineNo = lineNo
self.lineStr = lineStr
def __str__(self):
return self.msg + ' at line %i: %r' % (self.lineNo, self.lineStr)
class Parser:
"""Subclass this and override parsing event handlers to do useful work."""
lineDelimiter = '\n'
escapeChar = '`'
charSubs = 'r\r', 'n\n', 't\t'
def unescape(self, s):
"""Unescape value part of a key=value pair."""
for old, new in self.charSubs + (self.escapeChar * 2,):
s = s.replace(self.escapeChar + old, new)
return s
# Main method; call it with the text to be parsed as its sole argument
def parse(self, text):
# TO DO: change 'ParseError' usage to 'parseError' events so that
# subclasses can provide their own error handling/recovery behaviour.
lines = text.split(self.lineDelimiter)
blockDepth = 0
for lineNo in range(len(lines)):
lineStr = lines[lineNo]
if not _emptyLinePattern.match(lineStr):
lineMatch = _parseLinePattern.match(lineStr)
if not lineMatch:
self.parseError('Malformed line', lineNo, lineStr)
indentStr, name, isBlock, value = lineMatch.groups()
depth = len(indentStr)
if depth > blockDepth:
self.parseError('Bad indentation', lineNo, lineStr)
while depth < blockDepth:
blockDepth -= 1
self.closeBlock()
if isBlock:
self.openBlock(name)
blockDepth += 1
else:
self.addItem(name, self.unescape(value))
for _ in range(blockDepth):
self.closeBlock()
# Optionally override the following error event handler to provide your own error handling:
def parseError(self, desc, lineNo, lineStr):
raise ParseError(desc, lineNo, lineStr)
# Override the following parser event handlers:
def openBlock(self, name):
pass
def addItem(self, name, value):
pass
def closeBlock(self):
pass
class TestParser(Parser):
def openBlock(self, name):
print 'OPEN %r' % name
def addItem(self, name, value):
print 'ADD %r %r' % (name, value)
def closeBlock(self):
print 'CLOSE'
#######
class ListParser(Parser):
"""Use to parse text into a nested list; e.g.
foo=1
bar:
baz=3
produces:
[
('foo', '1'),
('bar', [
('baz', '3')
]
)
]
"""
class _Stack:
def __init__(self, lst): self.__stack = lst
def push(self, val): self.__stack.append(val)
def pop(self): return self.__stack.pop()
def top(self): return self.__stack[-1]
def depth(self): return len(self.__stack)
def parse(self, text):
self.stack = self._Stack(
[(None, [])]) # each stack entry is two-item tuple: (block name, list of items in block)
Parser.parse(self, text)
result = self.stack.pop()[1]
del self.stack
return result
def openBlock(self, name):
self.stack.push((name, []))
def addItem(self, name, value):
self.stack.top()[1].append((name, value))
def closeBlock(self):
block = self.stack.pop()
self.stack.top()[1].append(block)
#######
# TEST
if __name__ == '__main__':
s = """
# this is a comment line
email:
address=user@domain
real-name=Real Name
encryption:
format=PGP
key=some key
connection:
address=123.123.123.123
port=99
connection-type=INET
address-type=IP4
"""
TestParser().parse(s)
print
print ListParser().parse(s)
| [
"2378960008@qq.com"
] | 2378960008@qq.com |
f9533c10696274ee3f113a9ade94819a015ebdda | 3c92c3f633b613a62fb67476fd617e1140133880 | /leetcode/27. Remove Element.py | 389200567c30dff692573da7bd67a5203e14e62b | [] | no_license | cuiy0006/Algorithms | 2787f36f8164ded5252a006f723b570c9091bee9 | 00fd1397b65c68a303fcf963db3e28cd35c1c003 | refs/heads/master | 2023-03-31T13:55:59.191857 | 2023-03-31T03:39:42 | 2023-03-31T03:39:42 | 75,001,651 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
i = 0
j = 0
while i < len(nums):
while i < len(nums) and nums[i] == val:
i += 1
if i == len(nums):
break
nums[j] = nums[i]
j += 1
i += 1
return j
class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if len(nums) == 0:
return 0
i = 0
j = len(nums) - 1
while i < j:
while i < j and nums[i] != val:
i += 1
while i < j and nums[j] == val:
j -= 1
nums[i], nums[j] = nums[j], nums[i]
if nums[i] != val:
return i + 1
else:
return i
| [
"noreply@github.com"
] | cuiy0006.noreply@github.com |
629b3a5257059913a7da306eec068efe1e380de4 | bd8bc7abe0f774f84d8275c43b2b8c223d757865 | /705_DesignHashSet/MyHashSet.py | 06a999a2cec78ea54e5bd553bcec061ffea7135f | [
"MIT"
] | permissive | excaliburnan/SolutionsOnLeetcodeForZZW | bde33ab9aebe9c80d9f16f9a62df72d269c5e187 | 64018a9ead8731ef98d48ab3bbd9d1dd6410c6e7 | refs/heads/master | 2023-04-07T03:00:06.315574 | 2021-04-21T02:12:39 | 2021-04-21T02:12:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | class Bucket:
def __init__(self):
self.buckt = []
def add(self, key):
if key not in self.buckt:
self.buckt.append(key)
def remove(self, key):
for i, k in enumerate(self.buckt):
if k == key:
del self.buckt[i]
def contains(self, key):
return key in self.buckt
class MyHashSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.key_space = 2069
self.hash_table = [Bucket() for _ in range(self.key_space)]
def add(self, key: int) -> None:
hash_key = key % self.key_space
self.hash_table[hash_key].add(key)
def remove(self, key: int) -> None:
hash_key = key % self.key_space
self.hash_table[hash_key].remove(key)
def contains(self, key: int) -> bool:
"""
Returns true if this set contains the specified element
"""
hash_key = key % self.key_space
return self.hash_table[hash_key].contains(key)
# Your MyHashSet object will be instantiated and called as such:
# obj = MyHashSet()
# obj.add(key)
# obj.remove(key)
# param_3 = obj.contains(key)
| [
"noreply@github.com"
] | excaliburnan.noreply@github.com |
f008a629a26cf50be0ba05f2ff12cea28da03c6d | 9188d0d7ce9fc5fadf4d2593741894e1448f9326 | /indico/vendor/django_mail/__init__.py | 9cee3d734ac462e5c0da565826c3884569db9e34 | [
"MIT"
] | permissive | vaclavstepan/indico | b411410416acdfa50b0d374f89ec8208de00fb2f | 8ca1ac4d4a958f22f24580a790b3cb015570bdfb | refs/heads/master | 2023-07-21T04:42:03.031131 | 2021-09-01T09:54:17 | 2021-09-01T09:54:17 | 385,897,420 | 0 | 0 | MIT | 2021-07-16T13:07:32 | 2021-07-14T10:16:57 | null | UTF-8 | Python | false | false | 1,359 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
# TODO: Move this whole package into a standalone pypi package, since it's
# useful in general for anyoen who wants to send emails (without using django)
# The code in here is taken almost verbatim from `django.core.mail`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/blob/stable/2.2.x/django/core/mail/__init__.py
# Credits of the original code go to the Django Software Foundation
# and their contributors.
"""
Tools for sending email.
"""
from flask import current_app
from .backends.base import BaseEmailBackend
from .module_loading_utils import import_string
__all__ = ['get_connection']
def get_connection(backend=None, fail_silently=False, **kwds) -> BaseEmailBackend:
"""Load an email backend and return an instance of it.
If backend is None (default), use ``EMAIL_BACKEND`` from config.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = import_string(backend or current_app.config['EMAIL_BACKEND'])
return klass(fail_silently=fail_silently, **kwds)
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
7a98bd64abdb3d07cde81ab64491ac9725dcb528 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02936/s782768449.py | 56eea4ff0a74433df558677289cc44ba5326f3a0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | n, q = map(int, input().split())
Tree = [[] for _ in range(n+1)]
Counter = [0 for _ in range(n+1)]
AB = (tuple(map(int, input().split())) for _ in range(n-1))
PX = (tuple(map(int, input().split())) for _ in range(q))
for a,b in AB:
Tree[a].append(b)
Tree[b].append(a)
for p,x in PX:
Counter[p] += x
P = [-1]*(n+1)
nodes = [1]
while nodes:
parent = nodes.pop()
for node_i in Tree[parent]:
if P[parent]==node_i:
continue
P[node_i] = parent
nodes.append(node_i)
Counter[node_i] += Counter[parent]
print(*Counter[1:]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.