blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13d7db7aaca6f9e6a63202ca06de69f98a608bf5 | 002c14cd622b4890cce1c243065cebe39e2302ec | /CodingInterview2/24_ReverseList/reverse_list.py | 7299f43b46d7e486ee7a35d5b50c38ccae52c3da | [
"MIT"
] | permissive | hscspring/The-DataStructure-and-Algorithms | 6200eba031eac51b13e320e1fc9f204644933e00 | e704a92e091f2fdf5f27ec433e0e516ccc787ebb | refs/heads/master | 2022-08-29T18:47:52.378884 | 2022-08-25T16:22:44 | 2022-08-25T16:22:44 | 201,743,910 | 11 | 3 | MIT | 2021-04-20T18:28:47 | 2019-08-11T09:26:34 | Python | UTF-8 | Python | false | false | 1,502 | py | """
面试题 24:反转链表
题目:定义一个函数,输入一个链表的头结点,反转该链表并输出反转后链表的
头结点。
"""
class Node:
def __init__(self, val):
self.val = val
self.next = None
def list2link(lst):
root = Node(None)
ptr = root
for i in lst:
ptr.next = Node(i)
ptr = ptr.next
return root.next
def link2list(root: Node) -> list:
res = []
while root:
res.append(root.val)
root = root.next
return res
def reverse(link: Node) -> Node:
"""
Reverse a linklist
Parameters
-----------
link: Node
the given linklist
Returns
---------
out: Node
the revsersed linklist
Notes
------
"""
if not link:
return None
pre = None
while link:
val = link.val
node = Node(val)
node.next = pre
pre = node
link = link.next
return node
def reverse2(link: Node) -> Node:
pre = None
cur = link
while cur:
nxt = cur.next
cur.next = pre
pre = cur
cur = nxt
return pre
def reverse3(head: Node) -> Node:
if not head or not head.next:
return head
p = reverse3(head.next)
head.next.next = head
head.next = None
return p
if __name__ == '__main__':
link = list2link([1, 2])
rlink = reverse3(link)
print(rlink.val)
print(rlink.next)
# res = link2list(rlink)
# print(res)
| [
"haoshaochun@gmail.com"
] | haoshaochun@gmail.com |
9d003c26a99d9d7036d763b8aadcaa977689f9bc | fc24e89a0127acd8dac7d9357268e446cea66f59 | /app/__init__.py | 884fc9c3145181a34af1532607449ac33502e9e9 | [] | no_license | JamesMusyoka/Watch-list | 10e08d05a9ae5436629fcd8efcb320de8c943821 | 0070e66de152f77c828fc8eb08e25aa720cceac7 | refs/heads/master | 2022-12-24T02:04:02.825901 | 2020-10-06T20:17:55 | 2020-10-06T20:17:55 | 168,703,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from flask import Flask
from flask_bootstrap import Bootstrap
from .config import DevConfig
# Initializing application
app = Flask(__name__,instance_relative_config = True)
# Setting up configuration
app.config.from_object(DevConfig)
app.config.from_pyfile("config.py")
# Initializing Flask Extensions
bootstrap = Bootstrap(app)
from app import views
from app import error | [
"jamesmu475@gmail.com"
] | jamesmu475@gmail.com |
1a354b4773439ab02e2f24b309f7819104ba2030 | 795854ea2d73a5da0114694cd0a232d1f4589b5a | /users/migrations/0003_delete_profile.py | 7d757713350d36a25ee1abccbecdb21f856c0a81 | [] | no_license | JEENUMINI/recipeclassbasedview | fe0ec63af261f53eb765064ab9ec82bb40d5a969 | ced076d71e6a7cbe9790270b7efa1cbc4a5390a8 | refs/heads/master | 2023-02-02T09:52:02.702351 | 2020-12-24T18:51:59 | 2020-12-24T18:51:59 | 323,711,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # Generated by Django 3.1.3 on 2020-12-24 18:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_profilemodel'),
]
operations = [
migrations.DeleteModel(
name='Profile',
),
]
| [
"mini13.1994@gmail.com"
] | mini13.1994@gmail.com |
65ebf528c07a7855bcfc0136dcc061fdaf19a5fc | fa841ab3564e0e0fd6065201846fb6f305c43719 | /installation/utils.py | c4c988bcfdf3973234bec2b36d1090821a0aadb1 | [] | no_license | suipnice/Jalon | dc008232baba6c1295cb8a6d6001147e22e03c2a | bc003d10ed15d6ecc5f15fdb3809e9dd53b568bd | refs/heads/master | 2021-01-08T05:46:55.757385 | 2016-06-13T11:58:31 | 2016-06-13T11:58:31 | 241,926,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,950 | py | from datetime import datetime
from DateTime.DateTime import DateTime
from dateutil.parser import parse as dateparse
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone import PloneLocalesMessageFactory as _locales
from Products.CMFPlone import PloneMessageFactory as _plone
from Products.Ploneboard.interfaces import IComment
from Products.Ploneboard.interfaces import IConversation
from zope.i18n import translate
import time
class defer(object):
"""Defer function call until actually used.
Useful for date components in translations
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.func(*self.args, **self.kwargs)
def toPloneboardTime(context, request, time_=None):
"""Return time formatted for Ploneboard"""
ploneboard_time = None
ts = getToolByName(context, 'translation_service')
format = '%Y;%m;%d;%w;%H;%M;%S'
# fallback formats, english
young_format_en = '%A %H:%M'
old_format_en = '%B %d. %Y'
if not time_:
return 'Unknown date'
if callable(time_):
time_ = time_()
try:
if isinstance(time_, DateTime):
time_ = datetime.fromtimestamp(time_.timeTime())
else:
time_ = dateparse(str(time_))
(year, month, day,
hours, minutes, seconds, wday, _, dst) = time_.timetuple()
translated_date_elements = {
'year': year,
'month': unicode(
defer(
translate,
_locales(ts.month_msgid(month)),
context=request
)
),
'day': day,
'wday': unicode(
defer(
translate,
_locales(ts.day_msgid((wday + 1) % 7)),
context=request
)
),
'hours': "%02i" % hours,
'minutes': "%02i" % minutes,
'seconds': "%02i" % seconds
}
if time.time() - time.mktime(time_.timetuple()) < 604800: # 60*60*24*7
ploneboard_time = translate(
'${wday} ${hours}:${minutes}',
mapping=translated_date_elements,
context=request
)
else:
try:
ploneboard_time = translate(
_plone(
'old_date_format: ${year} ${month} ${day} '
'${hours}:${minutes}',
default=unicode(
time_.strftime(old_format_en).decode('utf-8')
),
mapping=translated_date_elements
),
context=request
)
except:
ploneboard_time = translate(
_plone(
'old_date_format: ${year} ${month} ${day} '
'${hours}:${minutes}',
default=time_.strftime(old_format_en),
mapping=translated_date_elements
),
context=request
)
except IndexError:
pass
return ploneboard_time
def getNumberOfComments(node, catalog=None):
"""Returns the number of comments to this forum.
"""
if catalog is None:
catalog = getToolByName(node, 'portal_catalog')
return len(catalog(
object_provides=IComment.__identifier__,
path='/'.join(node.getPhysicalPath())))
def getNumberOfConversations(node, catalog=None):
"""Returns the number of conversations in this forum.
"""
if catalog is None:
catalog = getToolByName(node, 'portal_catalog')
return len(catalog(
object_provides=IConversation.__identifier__,
path='/'.join(node.getPhysicalPath())))
| [
"bordonad@unice.fr"
] | bordonad@unice.fr |
569a3d223c7cb6f6f0df86ab3966d52d3518a40a | 159a2e75ff6cc7c0b58741c25636b83410e42bc7 | /数据结构与算法/合并排序merge_sort.py | 86619f34a4cd6b4984dd3097ff78ee7ec165ac3d | [] | no_license | articuly/python_study | e32ba6827e649773e5ccd953e35635aec92c2c15 | b7f23cdf3b74431f245fe30c9d73b4c6910b1067 | refs/heads/master | 2020-11-24T04:20:35.131925 | 2020-09-10T08:21:06 | 2020-09-10T08:21:06 | 227,961,859 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # coding:utf-8
import random
def merge_sort(lst):
if len(lst) <= 1:
return lst
middle = len(lst) // 2
left_lst = []
right_lst = []
for i in lst[:middle]:
if i <= lst[middle]:
left_lst.append(i)
else:
right_lst.append(i)
for i in lst[middle + 1:]:
if i <= lst[middle]:
left_lst.append(i)
else:
right_lst.append(i)
print(left_lst, [lst[middle]], right_lst)
return merge_sort(left_lst) + [lst[middle]] + merge_sort(right_lst)
lst = [random.randint(1, 100) for i in range(10)]
print('random list:', lst)
print('sorted list:', merge_sort(lst))
'''
每次分割成2部分,大概log2n次可以得到一个元素
合并次是O(n)
平均水平是O(nlog2n)
由于使用了递归和两个列表,所以以空间复杂度增大O(n)+O(logn)
'''
| [
"articuly@gmail.com"
] | articuly@gmail.com |
6a44cc3eece0be757f14a78b85ad67a7506e8ed1 | e53ed2cc0babec5c52ad4b8691ff32c74a57f019 | /prototype/wsgi/django.wsgi | 959df017cf49fb95052a0e46d17db55149fdfbb8 | [
"MIT",
"CC-BY-4.0"
] | permissive | NYPL/gazetteer | bf28f12c53b896acc1f805175106ccb42a71cbe1 | 708035e8d2299e70a6d3cecce40970242673426c | refs/heads/master | 2016-09-05T18:21:15.027147 | 2015-02-03T19:54:32 | 2015-02-03T19:54:32 | 18,560,844 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 768 | wsgi | # django.wsgi for gazetteer
import os
import sys
import site
project_module = 'prototype'
root_dir = os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
#using virtualenv's activate_this.py to reorder sys.path
activate_this = os.path.join(root_dir, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
sys.path.append(root_dir)
sys.path.append(os.path.join(root_dir, project_module))
#reload if this django.wsgi gets touched
from ox.django import monitor
monitor.start(interval=1.0)
monitor.track(os.path.abspath(os.path.dirname(__file__)))
os.environ['DJANGO_SETTINGS_MODULE'] = project_module + '.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| [
"b@pad.ma"
] | b@pad.ma |
f2a7b25271e5666777304dc9ae8ea519aeea7e14 | 64ed77abebb2ebab52ac0668c1639bfd79ec05d1 | /uploads/urls.py | 00c90287a6fc76f22881bbffa111853e3ef6032e | [
"MIT"
] | permissive | tiagocordeiro/estudio-abc | 731058582ee8d76366b9208687bbb906dcbca680 | 1f48d785063717ddc3b17b32df342b9eec7dbd32 | refs/heads/master | 2023-09-01T17:51:12.449297 | 2021-05-07T00:06:21 | 2021-05-13T17:16:15 | 140,905,450 | 0 | 0 | MIT | 2023-09-11T17:53:56 | 2018-07-14T01:08:22 | HTML | UTF-8 | Python | false | false | 191 | py | from django.urls import path
from . import views
urlpatterns = [
path('upload/', views.model_form_upload, name='uploads'),
path('list-files/', views.arquivos, name='list_files'),
]
| [
"tiago@mulhergorila.com"
] | tiago@mulhergorila.com |
78e7cd590afac746961274a42957f2802356d876 | 9b2eb0d6b673ac4945f9698c31840b847f790a58 | /pkg/apteco_api/models/version_details.py | 1d091491980802ad0a985957a993b65b589cc0e1 | [
"Apache-2.0"
] | permissive | Apteco/apteco-api | 6d21c9f16e58357da9ce64bac52f1d2403b36b7c | e8cf50a9cb01b044897025c74d88c37ad1612d31 | refs/heads/master | 2023-07-10T23:25:59.000038 | 2023-07-07T14:52:29 | 2023-07-07T14:52:29 | 225,371,142 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,558 | py | # coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from apteco_api.configuration import Configuration
class VersionDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'version': 'str'
}
attribute_map = {
'version': 'version'
}
def __init__(self, version=None, local_vars_configuration=None): # noqa: E501
"""VersionDetails - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._version = None
self.discriminator = None
self.version = version
@property
def version(self):
"""Gets the version of this VersionDetails. # noqa: E501
The version of the API # noqa: E501
:return: The version of this VersionDetails. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this VersionDetails.
The version of the API # noqa: E501
:param version: The version of this VersionDetails. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VersionDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VersionDetails):
return True
return self.to_dict() != other.to_dict()
| [
"tim.morris@apteco.com"
] | tim.morris@apteco.com |
d0effd8a6732f9e7257665ad5ac55ee41da609ef | 951a84f6fafa763ba74dc0ad6847aaf90f76023c | /P3/Solu5062__1167.py | edc270ced15fa3f1fbe7a74d67cd402e0023caf4 | [] | no_license | SakuraGo/leetcodepython3 | 37258531f1994336151f8b5c8aec5139f1ba79f8 | 8cedddb997f4fb6048b53384ac014d933b6967ac | refs/heads/master | 2020-09-27T15:55:28.353433 | 2020-02-15T12:00:02 | 2020-02-15T12:00:02 | 226,550,406 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from typing import List
import heapq
class Solution:
def connectSticks(self, sticks: List[int]) -> int:
if len(sticks) == 1:
return sticks[0]
sum = 0
heapq.heapify(sticks)
while len(sticks)>1:
#
num0 = heapq.heappop(sticks)
num1 = heapq.heappop(sticks)
sum += (num0+num1)
heapq.heappush(sticks,(num0+num1))
heapq._siftup(sticks,len(sticks)-1)
# print(sticks)
return sum
#
# lis = [5,1,2,3,3,3,5]
# heapq.heapify(lis)
# print(lis)
res = Solution().connectSticks([8,1,3,5])
print(res)
# print(1397754 - 1363767) | [
"452681917@qq.com"
] | 452681917@qq.com |
a6f2869dde01b317767cd8871554c14f5a4bf8a1 | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /web/controllers/pivot.py | 5ad07b67ca58fc54e4afa78116fb11a33198b10d | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,577 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from collections import deque
import json
from odoo import http
from odoo.http import request
from odoo.tools import ustr
from odoo.tools.misc import xlwt
class TableExporter(http.Controller):
@http.route('/web/pivot/check_xlwt', type='json', auth='none')
def check_xlwt(self):
return xlwt is not None
@http.route('/web/pivot/export_xls', type='http', auth="user")
def export_xls(self, data, token):
jdata = json.loads(data)
nbr_measures = jdata['nbr_measures']
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet(jdata['title'])
header_bold = xlwt.easyxf("font: bold on; pattern: pattern solid, fore_colour gray25;")
header_plain = xlwt.easyxf("pattern: pattern solid, fore_colour gray25;")
bold = xlwt.easyxf("font: bold on;")
# Step 1: writing headers
headers = jdata['headers']
# x,y: current coordinates
# carry: queue containing cell information when a cell has a >= 2 height
# and the drawing code needs to add empty cells below
x, y, carry = 1, 0, deque()
for i, header_row in enumerate(headers):
worksheet.write(i, 0, '', header_plain)
for header in header_row:
while (carry and carry[0]['x'] == x):
cell = carry.popleft()
for i in range(nbr_measures):
worksheet.write(y, x+i, '', header_plain)
if cell['height'] > 1:
carry.append({'x': x, 'height': cell['height'] - 1})
x = x + nbr_measures
style = header_plain if 'expanded' in header else header_bold
for i in range(header['width']):
worksheet.write(y, x + i, header['title'] if i == 0 else '', style)
if header['height'] > 1:
carry.append({'x': x, 'height': header['height'] - 1})
x = x + header['width']
while (carry and carry[0]['x'] == x):
cell = carry.popleft()
for i in range(nbr_measures):
worksheet.write(y, x+i, '', header_plain)
if cell['height'] > 1:
carry.append({'x': x, 'height': cell['height'] - 1})
x = x + nbr_measures
x, y = 1, y + 1
# Step 2: measure row
if nbr_measures > 1:
worksheet.write(y, 0, '', header_plain)
for measure in jdata['measure_row']:
style = header_bold if measure['is_bold'] else header_plain
worksheet.write(y, x, measure['measure'], style)
x = x + 1
y = y + 1
# Step 3: writing data
x = 0
for row in jdata['rows']:
worksheet.write(y, x, row['indent'] * ' ' + ustr(row['title']), header_plain)
for cell in row['values']:
x = x + 1
if cell.get('is_bold', False):
worksheet.write(y, x, cell['value'], bold)
else:
worksheet.write(y, x, cell['value'])
x, y = 0, y + 1
response = request.make_response(None,
headers=[('Content-Type', 'application/vnd.ms-excel'),
('Content-Disposition', 'attachment; filename=table.xls')],
cookies={'fileToken': token})
workbook.save(response.stream)
return response
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
9a2c8288066e8396de41672a53e666d43d65a8bf | 791da33d91836572ab0ffb9042468586d28527ef | /initiatives/migrations/0017_auto_20200308_1231.py | f32b1c902cb93516f84d32db058c20f3c1fee847 | [] | no_license | Akash12740/nirmaan_bits | 1de8a9b478e99c09c74f5f0b54d6b3b143a79800 | 95054e03e56a3a8a60fa3178c8cf4383df4611a2 | refs/heads/master | 2023-06-15T21:13:51.874238 | 2021-07-06T13:48:32 | 2021-07-06T13:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # Generated by Django 2.2 on 2020-03-08 07:01
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('initiatives', '0016_auto_20200308_1157'),
]
operations = [
migrations.AlterField(
model_name='initiative',
name='date_started',
field=models.DateField(default=datetime.datetime(2020, 3, 8, 7, 1, 16, 289299, tzinfo=utc), verbose_name='Start Date'),
),
migrations.AlterField(
model_name='initiativecomment',
name='message',
field=models.CharField(default='', max_length=255, verbose_name=''),
),
migrations.AlterField(
model_name='initiativecomment',
name='published_on',
field=models.DateField(default=datetime.datetime(2020, 3, 8, 7, 1, 16, 320315, tzinfo=utc)),
),
]
| [
"f20190029@pilani.bits-pilani.ac.in"
] | f20190029@pilani.bits-pilani.ac.in |
b300aeadf3c9fa85e1db1e5b8c130ba3ae345d0d | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/cosmos/azure-cosmos/samples/access_cosmos_with_aad_async.py | a24498996df52805eec1b9b4ef8693957c9656bb | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 5,282 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from azure.cosmos.aio import CosmosClient
import azure.cosmos.exceptions as exceptions
from azure.cosmos.partition_key import PartitionKey
from azure.identity.aio import ClientSecretCredential, DefaultAzureCredential
import config
import asyncio
# ----------------------------------------------------------------------------------------------------------
# Prerequisites -
#
# 1. An Azure Cosmos account -
# https://docs.microsoft.com/azure/cosmos-db/create-sql-api-python#create-a-database-account
#
# 2. Microsoft Azure Cosmos
# pip install azure-cosmos>=4.3.0b4
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates how to authenticate and use your database account using AAD credentials
# Read more about operations allowed for this authorization method: https://aka.ms/cosmos-native-rbac
# ----------------------------------------------------------------------------------------------------------
# Note:
# This sample creates a Container to your database account.
# Each time a Container is created the account will be billed for 1 hour of usage based on
# the provisioned throughput (RU/s) of that account.
# ----------------------------------------------------------------------------------------------------------
# <configureConnectivity>
HOST = config.settings["host"]
MASTER_KEY = config.settings["master_key"]
TENANT_ID = config.settings["tenant_id"]
CLIENT_ID = config.settings["client_id"]
CLIENT_SECRET = config.settings["client_secret"]
DATABASE_ID = config.settings["database_id"]
CONTAINER_ID = config.settings["container_id"]
PARTITION_KEY = PartitionKey(path="/id")
def get_test_item(num):
test_item = {
'id': 'Item_' + str(num),
'test_object': True,
'lastName': 'Smith'
}
return test_item
async def create_sample_resources():
print("creating sample resources")
async with CosmosClient(HOST, MASTER_KEY) as client:
db = await client.create_database(DATABASE_ID)
await db.create_container(id=CONTAINER_ID, partition_key=PARTITION_KEY)
async def delete_sample_resources():
print("deleting sample resources")
async with CosmosClient(HOST, MASTER_KEY) as client:
await client.delete_database(DATABASE_ID)
async def run_sample():
# Since Azure Cosmos DB data plane SDK does not cover management operations, we have to create our resources
# with a master key authenticated client for this sample.
await create_sample_resources()
# With this done, you can use your AAD service principal id and secret to create your ClientSecretCredential.
# The async ClientSecretCredentials, like the async client, also have a context manager,
# and as such should be used with the `async with` keywords.
async with ClientSecretCredential(
tenant_id=TENANT_ID,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET) as aad_credentials:
# Use your credentials to authenticate your client.
async with CosmosClient(HOST, aad_credentials) as aad_client:
print("Showed ClientSecretCredential, now showing DefaultAzureCredential")
# You can also utilize DefaultAzureCredential rather than directly passing in the id's and secrets.
# This is the recommended method of authentication, and uses environment variables rather than in-code strings.
async with DefaultAzureCredential() as aad_credentials:
# Use your credentials to authenticate your client.
async with CosmosClient(HOST, aad_credentials) as aad_client:
# Do any R/W data operations with your authorized AAD client.
db = aad_client.get_database_client(DATABASE_ID)
container = db.get_container_client(CONTAINER_ID)
print("Container info: " + str(container.read()))
await container.create_item(get_test_item(879))
print("Point read result: " + str(container.read_item(item='Item_0', partition_key='Item_0')))
query_results = [item async for item in
container.query_items(query='select * from c', partition_key='Item_0')]
assert len(query_results) == 1
print("Query result: " + str(query_results[0]))
await container.delete_item(item='Item_0', partition_key='Item_0')
# Attempting to do management operations will return a 403 Forbidden exception.
try:
await aad_client.delete_database(DATABASE_ID)
except exceptions.CosmosHttpResponseError as e:
assert e.status_code == 403
print("403 error assertion success")
# To clean up the sample, we use a master key client again to get access to deleting containers/ databases.
await delete_sample_resources()
print("end of sample")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
| [
"noreply@github.com"
] | gaoyp830.noreply@github.com |
4c9919c44d46a3e676ed56ba016ce0d743f80c2d | bc87bfe38516877dcd2113281a39e2b3fad00229 | /site/05.level2_lane_detection/to_inverse_perspective_mapping.py | f6718365efd0f6fe180b4b52b76e17d15a2b50d2 | [
"Apache-2.0"
] | permissive | FaBoPlatform/RobotCarAI | 7e8f9cf017e5380c1825f7397ee3de933efbc3f2 | c89d3330a2beda0f253733d3252b2b035b153b6b | refs/heads/master | 2021-07-05T06:49:33.414606 | 2020-07-07T05:04:31 | 2020-07-07T05:04:31 | 91,663,056 | 10 | 3 | Apache-2.0 | 2019-04-02T06:54:24 | 2017-05-18T07:34:19 | Python | UTF-8 | Python | false | false | 2,076 | py | # coding: utf-8
# Inverse Perspective Mappingを確認する
#%matplotlib inline
import cv2
from matplotlib import pyplot as plt
import numpy as np
import time
import os
import sys
import math
from lib.functions import *
def main():
FILE_DIR = './test_images'
FILENAME = "frame_1"
OUTPUT_DIR ='./output'
mkdir(OUTPUT_DIR)
print("OpenCV Version : %s " % cv2.__version__)
try:
IMAGE_FORMAT = 1
cv_bgr = cv2.imread(os.path.join(FILE_DIR, FILENAME)+".jpg", IMAGE_FORMAT)
########################################
# Inverse Perspective Mapping Coordinates
########################################
# robocar camera demo_lane
ipm_vertices = calc_ipm_vertices(cv_bgr,
top_width_rate=0.80,top_height_position=0.65,
bottom_width_rate=2.0,bottom_height_position=1)
########################################
# IPM座標を確認する
########################################
cv_bgr_ipm_before_preview = draw_vertices(cv_bgr,ipm_vertices)
plt.title('Before IPM')
plt.imshow(to_rgb(cv_bgr_ipm_before_preview))
plt.show()
cv_bgr_ipm_after_preview = to_ipm(cv_bgr_ipm_before_preview,ipm_vertices)
plt.title('After IPM')
plt.imshow(to_rgb(cv_bgr_ipm_after_preview))
plt.show()
cv2.imwrite(OUTPUT_DIR+"/result_"+FILENAME+"_before_ipm.jpg",cv_bgr_ipm_before_preview)
cv2.imwrite(OUTPUT_DIR+"/result_"+FILENAME+"_after_ipm.jpg",cv_bgr_ipm_after_preview)
########################################
# Inverse Perspective Mapping
########################################
cv_bgr = to_ipm(cv_bgr,ipm_vertices)
plt.title('IPM')
plt.imshow(to_rgb(cv_bgr))
plt.show()
cv2.imwrite(OUTPUT_DIR+"/result_"+FILENAME+"_ipm.jpg",cv_bgr)
except:
import traceback
traceback.print_exc()
finally:
pass
return
if __name__ == '__main__':
main()
| [
"takanashi@gclue.jp"
] | takanashi@gclue.jp |
ac6f275a64a93d3e16e4f17b0470b886fc2e22e6 | a8d68074db5c2b2697650ed0281979d3e00cf5a8 | /Nyspider/www.tjcn.org/patent.py | 6e44e6e48df999043441310950c7f0bf89893d78 | [] | no_license | 15807857476/bogdata-2 | 9595609ea2ae5ae0a48c511f911df2498456467e | 1934cdfa234b77ca91e349b84688db113ff39e8c | refs/heads/master | 2023-05-26T19:10:18.439269 | 2019-05-24T02:50:41 | 2019-05-24T02:50:41 | 188,327,526 | 3 | 1 | null | 2023-05-22T21:37:27 | 2019-05-24T00:53:28 | Python | UTF-8 | Python | false | false | 3,575 | py | import requests
from bs4 import BeautifulSoup
import time
import openpyxl
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0"}
def get_citys(url):
html=requests.get(url,headers=headers).text.encode('iso-8859-1').decode('gbk','ignore')
table=BeautifulSoup(html,'lxml').find('div',{'class':'content'}).find('table',{'class':'sy'}).find_all('tr')
citys=[]
for item in table:
try:
for i in item.find_all('td')[1].find_all('a'):
try:
name=i.get_text()
url=i.get('href')
citys.append([name,url])
except:
continue
except:
continue
return citys
def get_text(url):
html=requests.get(url,headers=headers,timeout=30).text.encode('iso-8859-1').decode('gbk','ignore')
text=BeautifulSoup(html,'lxml').find('div',{'class':'viewbox'}).get_text().replace('\r','').replace('\n','').split('。')
result=''
page=2
while True:
page_url=url.replace('.html','_%s.html'%page)
html=requests.get(page_url,headers=headers,timeout=30).text.encode('iso-8859-1').decode('gbk','ignore')
if '您正在搜索的页面可能已经删除、更名或暂时不可用。' in html:
break
text+=BeautifulSoup(html,'lxml').find('div',{'class':'viewbox'}).get_text().replace('\r','').replace('\n','').split('。')
page+=1
for item in text:
if '专利' in item:
result+=item+'\n'
return result
def get_failed():
failed=[eval(line) for line in open('./failed.txt','r')]
for item in failed:
try:
text=get_text('http://www.tjcn.org'+item[-1])
except:
failed=open('failed_1.txt','a')
failed.write(str(item)+'\n')
failed.close()
continue
f=open('result.txt','a')
f.write(str([item[0],item[1:],text])+'\n')
f.close()
print(item)
def main():
years={'2009':'http://www.tjcn.org/tjgbsy/nd/3595.html','2010':'http://www.tjcn.org/tjgbsy/nd/17848.html','2011':'http://www.tjcn.org/tjgbsy/nd/23306.html'}
for key in years:
citys=get_citys(years[key])
for city in citys:
try:
text=get_text('http://www.tjcn.org'+city[1])
except:
failed=open('failed.txt','a')
failed.write(str([key]+city)+'\n')
failed.close()
continue
f=open('result.txt','a')
f.write(str([key,city,text])+'\n')
f.close()
print(city,key)
def write_to_excel():
data=[eval(line) for line in open('result.txt','r')]
result={}
for item in data:
try:
result[item[1][0]][item[0]]=item[-1]
except:
result[item[1][0]]={}
result[item[1][0]][item[0]]=item[-1]
excel=openpyxl.Workbook(write_only=True)
sheet=excel.create_sheet()
keys=['2009','2010','2011','2012','2013','2014','2015']
for key in result:
line=[]
for year in keys:
try:
line.append(result[key][year])
except:
line.append('')
print(year,key)
sheet.append([key]+line)
excel.save('result.xlsx')
write_to_excel()
| [
"2397955090@qq.com"
] | 2397955090@qq.com |
98580de3e9f491636334ad285359f2716c1b9e93 | 56b39e46dfd2a54c72728974c13e8ea1f1486f04 | /social_rss/tw.py | 4d2fd31d786618d4ad0cea8841612dbe744530e6 | [] | no_license | vlevit/social-rss | 1f74e4c8306fb6fad75bbd84cb258d4e4321610c | 65df4947c490f13ed6fe3c059883b00625e2ef04 | refs/heads/master | 2020-07-02T21:50:32.367241 | 2018-05-31T19:48:19 | 2018-05-31T19:48:19 | 201,677,586 | 1 | 0 | null | 2019-08-10T20:06:21 | 2019-08-10T20:06:21 | null | UTF-8 | Python | false | false | 6,081 | py | """Twitter module."""
# Note: Twitter HTML-escapes all the data it sends by API.
import calendar
import json
import logging
import os
import pprint
from urllib.parse import urlencode
import dateutil.parser
from twitter import OAuth, Twitter
from social_rss import config
from social_rss.render import block as _block
from social_rss.render import image as _image
from social_rss.render import image_block as _image_block
from social_rss.render import link as _link
from social_rss.request import BaseRequestHandler
LOG = logging.getLogger(__name__)
_TWITTER_URL = "https://twitter.com/"
"""Twitter URL."""
class RequestHandler(BaseRequestHandler):
"""Twitter RSS request handler."""
def initialize(self, credentials=None):
self.__credentials = credentials
def get(self):
"""Handles the request."""
if self.__credentials is None and not self.__get_credentials():
return
if config.OFFLINE_DEBUG_MODE or config.WRITE_OFFLINE_DEBUG:
debug_path = os.path.join(config.OFFLINE_DEBUG_PATH, "twitter")
if config.OFFLINE_DEBUG_MODE:
with open(debug_path, "rb") as debug_response:
timeline = json.loads(debug_response.read().decode())
else:
api = Twitter(auth=OAuth(self.__credentials["access_token_key"], self.__credentials["access_token_secret"],
self.__credentials["consumer_key"], self.__credentials["consumer_secret"]))
timeline = api.statuses.home_timeline(tweet_mode="extended", _timeout=config.API_TIMEOUT)
if config.WRITE_OFFLINE_DEBUG:
with open(debug_path, "wb") as debug_response:
debug_response.write(json.dumps(timeline).encode())
try:
feed = _get_feed(timeline)
except Exception:
LOG.exception("Failed to process Twitter timeline:%s", pprint.pformat(timeline))
raise
self._write_rss(feed)
def __get_credentials(self):
separator = "_"
credentials = self._get_credentials()
if (
credentials is None or
separator not in credentials[0] or separator not in credentials[1]
):
self._unauthorized(
"Please enter your Twitter credentials: "
"user=$consumer_key{0}$consumer_secret, "
"password=$access_token_key{0}$access_token_secret.".format(separator))
return False
consumer, access_token = credentials
consumer_key, consumer_secret = consumer.split(separator, 1)
access_token_key, access_token_secret = access_token.split(separator, 1)
self.__credentials = {
"consumer_key": consumer_key,
"consumer_secret": consumer_secret,
"access_token_key": access_token_key,
"access_token_secret": access_token_secret,
}
return True
def _get_feed(timeline):
"""Generates a feed from timeline."""
items = []
for tweet in timeline:
item = { "id": tweet["id_str"] }
try:
item["time"] = int(calendar.timegm(dateutil.parser.parse(tweet["created_at"]).utctimetuple()))
if tweet.get("retweeted_status") is None:
real_tweet = tweet
item["title"] = tweet["user"]["name"]
else:
real_tweet = tweet["retweeted_status"]
item["title"] = "{} (retweeted by {})".format(
real_tweet["user"]["name"], tweet["user"]["name"])
item["url"] = _twitter_user_url(real_tweet["user"]["screen_name"]) + "/status/" + real_tweet["id_str"]
item["text"] = _image_block(
_twitter_user_url(real_tweet["user"]["screen_name"]),
real_tweet["user"]["profile_image_url_https"],
_parse_text(real_tweet["full_text"], real_tweet["entities"]))
except Exception:
LOG.exception("Failed to process the following tweet:\n%s",
pprint.pformat(tweet))
item.setdefault("title", "Internal server error")
item.setdefault("text", "Internal server error has occurred during processing this tweet")
items.append(item)
return {
"title": "Twitter",
"url": _TWITTER_URL,
"image": _TWITTER_URL + "images/resources/twitter-bird-light-bgs.png",
"description": "Twitter timeline",
"items": items,
}
def _parse_text(text, tweet_entities):
"""Parses a tweet text."""
sorted_entities = []
for entity_type, entities in tweet_entities.items():
for entity in entities:
sorted_entities.append(( entity_type, entity ))
sorted_entities.sort(
key=lambda entity_tuple: entity_tuple[1]["indices"][0], reverse=True)
html = ""
media_html = ""
pos = len(text)
for entity_type, entity in sorted_entities:
start, end = entity["indices"]
if end < pos:
html = text[end:pos] + html
if entity_type == "urls":
html = _link(entity["expanded_url"], entity["display_url"]) + html
elif entity_type == "user_mentions":
html = _link(_twitter_user_url(entity["screen_name"]), entity["name"]) + html
elif entity_type == "hashtags":
html = _link(_TWITTER_URL + "search?" + urlencode({ "q": "#" + entity["full_text"] }), text[start:end]) + html
elif entity_type == "media":
html = _link(entity["expanded_url"], entity["display_url"]) + html
media_html += _block(_link(entity["expanded_url"], _image(entity["media_url_https"])))
else:
LOG.error("Unknown tweet entity:\n%s", pprint.pformat(entity))
html = text[start:end] + html
pos = start
if pos:
html = text[:pos] + html
return _block(html) + media_html
def _twitter_user_url(screen_name):
"""Returns URL of the specified user profile."""
return _TWITTER_URL + screen_name
| [
"konishchev@gmail.com"
] | konishchev@gmail.com |
b8b4235644dd1246cd1a8963b60d826eba72dec9 | f138be1e8e382c404cfe1ff6a35e90fc77fa9bff | /ABC/python/101/A.py | f945008b581214b3b341d4b92e8d9345a1f9a3df | [] | no_license | jackfrostwillbeking/atcoder_sample | 8547d59ca2f66b34905f292191df6c474010fded | d5b2fe8f628fd56eaf23ee7e92938e8ac1b1fef9 | refs/heads/master | 2023-07-25T19:16:14.340414 | 2021-08-26T15:26:08 | 2021-08-26T15:26:08 | 273,857,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import sys
S = list(input())
if len(S) != 4:
sys.exit()
result = 0
for I in range(len(S)):
if S[I] == '+':
result += 1
else:
result -= 1
print(result) | [
"jackfrostwillbeking@gmail.com"
] | jackfrostwillbeking@gmail.com |
d91be1f01e5ef10aed2a7827f0ddced4fa214eb1 | 1b46da70028b6fd491df59381884c2acf35b20f4 | /parcalar/WordCounter.py | fb29e174a9188d1f9ad7184972a5cd49838f7009 | [] | no_license | ybgirgin3/beautifulSoup | 28788e07eee7686129da82f529aa00cbf59bf3be | f65ca058c1178d41c1ccb799b9e765c01836bd14 | refs/heads/main | 2023-05-08T02:23:48.767751 | 2021-06-05T07:19:19 | 2021-06-05T07:19:19 | 374,047,699 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # bir url al onun içindeki kelimelerin sayılarını bul
import requests
from pprint import pprint
from collections import Counter
url = "https://ybgirgin3.github.io/"
r = requests.get(url)
# ret = r.content
kaynak_kodu = r.text
#print(ret)
kelime_listesi = kaynak_kodu.split()
kelime_freq = []
for kelime in kelime_listesi:
kelime_freq.append(kelime_listesi.count(kelime))
# kelimeler ve sayıları
#ret = str(list(zip(kelime_listesi, kelime_freq)))
ret = list(zip(kelime_listesi, kelime_freq))
pprint(Counter(ret))
| [
"ybgirgin3@gmail.com"
] | ybgirgin3@gmail.com |
e13d5d8ca3e21ebe719a683ad5697b738451b274 | 73770ddb5441f589742dd600545c555dd9922ae8 | /training/routes.py | 66903cda1404b04fffee6619145157a5ce612665 | [] | no_license | annndrey/trainingsite | c200a203c7a5fe832de70e9497a93df3745d63ef | 6dce55dc25b2a6f5285a8cfba1fbd7d83dbbe96b | refs/heads/master | 2020-03-29T16:11:06.336532 | 2018-09-24T12:53:55 | 2018-09-24T12:53:55 | 150,102,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | def includeme(config):
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('login', '/login')
config.add_route('register', '/register')
config.add_route('logout', '/logout')
config.add_route('dashboard', '/dashboard')
config.add_route('settings', '/settings')
config.add_route('resetpassword', '/resetpassword')
config.add_route('showone', '/view/{what}/{id:\d+}')
config.add_route('showall', '/view/{what}')
config.add_route('new', '/new/{what}')
config.add_route('edit', '/edit/{what}/{id:\d+}')
config.add_route('delete', '/delete/{what}/{id:\d+}')
config.add_route('about', '/about')
#for course modification
config.add_route('modifycourse', '/modify/{courseid:\d+}')
config.add_route('courseaction', '/mod/{courseid:\d+}/*args')
#subscriptions
config.add_route('subscradd', '/subscribe/{courseid:\d+}')
config.add_route('subscrpause', '/sub/{action}/{subscrid:\d+}')
config.add_route('payment', '/payment/{action}/{courseid:\d+}/{userid:\d+}')
config.add_route('paymentcheck', '/check')
#workouts
config.add_route('workout', '/wkt/{wktid:\d+}')
| [
"annndrey@trololo.info"
] | annndrey@trololo.info |
995ec97bd13cb19bd5a5629c8ea7678f771f6b95 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_25508.py | 156459636628d599fa8bca3d345d88685047bbaa | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | # how do you read a pytables table into a pandas dataframe
a = tb.open_file("FGBS.h5")
table = a.root.quote.z4
c = pd.DataFrame.from_records(table.read())
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
69789791fe27d985d5b3a31c5fe6a5288951fcca | 9a91dde7f08d081cdf1a9ed1b7357a49a9fec9d0 | /erp/migrations/0084_auto_20160414_0411.py | c4aad3fb74f3418a16d69e3daf22fb1efcf65992 | [] | no_license | fengxia41103/fashion | a8c812ae0534ed632c1d821b83817d2a830c9fc3 | 6834dcc523dcb1f1f8de0aa3e8779e6cac2ae126 | refs/heads/master | 2022-12-17T00:25:48.600435 | 2019-10-10T18:56:10 | 2019-10-10T18:56:10 | 52,611,714 | 0 | 0 | null | 2022-12-07T23:24:17 | 2016-02-26T14:59:48 | Python | UTF-8 | Python | false | false | 766 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('erp', '0083_auto_20160402_0821'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='file_base64',
field=models.TextField(default=None, help_text='Based64 encoded file data', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='attachment',
name='thumbnail_base64',
field=models.TextField(default=None, help_text='Base64 encoded thumbnail data', null=True, blank=True),
preserve_default=True,
),
]
| [
"feng_xia41103@hotmail.com"
] | feng_xia41103@hotmail.com |
002f3acca24ddb7e607e9823a55f8c48a276f86e | 9bc006e71393c5338bd77bbf12c2946dbfaf2d5b | /delete_node_in_a_linked_list.py | 811b0304d54bfae9698560e40708caf6088c4c6b | [] | no_license | aroraakshit/coding_prep | 2d883058ef81317fba0c101fac3da37cd6ecd9ca | aac41ddd2ec5f6e5c0f46659696ed5b67769bde2 | refs/heads/master | 2021-07-09T18:28:26.706576 | 2020-07-08T03:07:26 | 2020-07-08T03:07:26 | 163,704,385 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution: # 44ms
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next | [
"akshit.arora@colorado.edu"
] | akshit.arora@colorado.edu |
dd1e9efcdbcd7e8dc8920c3a218397a4710e5004 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Diagnostics/__init___parts/InstanceData.py | e24830b8c778ede635342a22197cdebfd64feeef | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | class InstanceData(object):
"""
Holds instance data associated with a performance counter sample.
InstanceData(instanceName: str,sample: CounterSample)
"""
@staticmethod
def __new__(self,instanceName,sample):
""" __new__(cls: type,instanceName: str,sample: CounterSample) """
pass
InstanceName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the instance name associated with this instance data.
Get: InstanceName(self: InstanceData) -> str
"""
RawValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the raw data value associated with the performance counter sample.
Get: RawValue(self: InstanceData) -> Int64
"""
Sample=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the performance counter sample that generated this data.
Get: Sample(self: InstanceData) -> CounterSample
"""
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
3d0745040c59f150abe1510398a3005cbf6de8d4 | aa54fd5cafc65d18ceac52097237482cec27f674 | /planetary_system_stacker/Test_programs/argparser.py | 34046c6c3c2e4e620b81b650a89b095fbc9e1b82 | [] | no_license | Rolf-Hempel/PlanetarySystemStacker | 84f6934e6748177fb1aca20b54392dee5c3f2e3c | 304952a8ac8e991e111e3fe2dba95a6ca4304b4e | refs/heads/master | 2023-07-20T04:11:06.663774 | 2023-07-17T15:20:15 | 2023-07-17T15:20:15 | 148,365,620 | 228 | 34 | null | 2023-09-01T16:33:05 | 2018-09-11T19:00:13 | Python | UTF-8 | Python | false | false | 7,639 | py | import argparse
import sys
print("The command was called with " + str(len(sys.argv)) + " arguments")
if len(sys.argv) <= 1:
print ("Running PSS in interactive mode, starting GUI.")
exit()
def noise_type(x):
x = int(x)
if not 0 <= x <= 11:
raise argparse.ArgumentTypeError("Noise level must be between 0 and 11")
return x
def stab_size_type(x):
x = int(x)
if not 5 <= x <= 80:
raise argparse.ArgumentTypeError("Stabilization patch size must be between 5% and 80%")
return x
def stab_sw_type(x):
x = int(x)
if not 5 <= x <= 150:
raise argparse.ArgumentTypeError(
"Stabilization search width must be between 5 and 150 pixels")
return x
def rf_percent_type(x):
x = int(x)
if not 3 <= x <= 30:
raise argparse.ArgumentTypeError(
"Percentage of best frames for reference frame computation must be between 3% and 30%")
return x
def align_box_width_type(x):
x = int(x)
if not 20 <= x <= 140:
raise argparse.ArgumentTypeError(
"Alignment point box width must be between 20 and 140 pixels")
return x
def align_search_width_type(x):
x = int(x)
if not 6 <= x <= 30:
raise argparse.ArgumentTypeError(
"Alignment point search width must be between 6 and 30 pixels")
return x
def align_min_struct_type(x):
x = float(x)
if not 0.01 <= x <= 0.30:
raise argparse.ArgumentTypeError(
"Alignment point minimum structure must be between 0.01 and 0.30")
return x
def align_min_bright_type(x):
x = int(x)
if not 2 <= x <= 50:
raise argparse.ArgumentTypeError(
"Alignment point minimum brightness must be between 2 and 50")
return x
def stack_percent_type(x):
x = int(x)
if not 1 <= x <= 100:
raise argparse.ArgumentTypeError(
"Percentage of best frames to be stacked must be between 1 and 100")
return x
def stack_number_type(x):
x = int(x)
if not 1 <= x:
raise argparse.ArgumentTypeError(
"Number of best frames to be stacked must be greater or equal 1")
return x
def normalize_bco_type(x):
x = int(x)
if not 0 <= x <= 40:
raise argparse.ArgumentTypeError(
"Normalization black cut-off must be between 0 and 40")
return x
parser = argparse.ArgumentParser()
parser.add_argument("job_input", nargs='+', help="input video files or still image folders")
parser.add_argument("-p", "--protocol", action="store_true",
help="Store protocol with results")
parser.add_argument("--protocol_detail", type=int, choices=[0, 1, 2], default=1,
help="Protocol detail level")
parser.add_argument("-b", "--buffering_level", type=int, choices=[0, 1, 2, 3, 4], default=2,
help="Buffering level")
parser.add_argument("--out_format", choices=["png", "tiff", "fits"], default="png",
help="Image format for output")
parser.add_argument("--name_add_f", action="store_true",
help="Add number of stacked frames to output file name")
parser.add_argument("--name_add_p", action="store_true",
help="Add percentage of stacked frames to output file name")
parser.add_argument("--name_add_apb", action="store_true",
help="Add alignment point box size (pixels) to output file name")
parser.add_argument("--name_add_apn", action="store_true",
help="Add number of alignment points to output file name")
parser.add_argument("--debayering", choices=["Auto detect color", "Grayscale", "RGB", "RGB", "BGR",
"Force Bayer RGGB", "Force Bayer GRBG",
"Force Bayer GBRG", "Force Bayer BGGR"],
default="Auto detect color", help="Debayering option")
parser.add_argument("--noise", type=noise_type, default=7, help="Noise level (add Gaussian blur)")
parser.add_argument("-m", "--stab_mode", choices=["Surface", "Planet"], default="Surface",
help="Frame stabilization mode")
parser.add_argument("--stab_size", type=stab_size_type, default=33,
help="Stabilization patch size (% of frame)")
parser.add_argument("--stab_sw", type=stab_sw_type, default=34,
help="Stabilization search width (pixels)")
parser.add_argument("--rf_percent", type=rf_percent_type, default=5,
help="Percentage of best frames for reference frame computation")
parser.add_argument("-d", "--dark", help="Image file for dark frame correction")
parser.add_argument("-f", "--flat", help="Image file for flat frame correction")
parser.add_argument("-a", "--align_box_width", type=align_box_width_type, default=48,
help="Alignment point box width (pixels)")
parser.add_argument("-w", "--align_search_width", type=align_search_width_type, default=14,
help="Alignment point search width (pixels)")
parser.add_argument("--align_min_struct", type=align_min_struct_type, default=0.04,
help="Alignment point minimum structure")
parser.add_argument("--align_min_bright", type=align_min_bright_type, default=10,
help="Alignment point minimum brightness")
parser.add_argument("-s", "--stack_percent", type=stack_percent_type, default=10,
help="Percentage of best frames to be stacked")
parser.add_argument("--stack_number", type=stack_number_type,
help="Number of best frames to be stacked")
parser.add_argument("-n", "--normalize_bright", action="store_true",
help="Normalize frame brightness")
parser.add_argument("--normalize_bco", type=normalize_bco_type, default=15,
help="Normalization black cut-off")
args = parser.parse_args()
print(str(args.job_input))
print("Store protocol with results: " + str(args.protocol))
print("Protocol detail level: " + str(args.protocol_detail))
print("Buffering level: " + str(args.buffering_level))
print("Image format for output: " + args.out_format)
print("Add number of stacked frames to output file name: " + str(args.name_add_f))
print("Add percentage of stacked frames to output file name: " + str(args.name_add_p))
print("Add alignment point box size (pixels) to output file name: " + str(args.name_add_apb))
print("Add number of alignment points to output file name: " + str(args.name_add_apn))
print("")
print("Debayering option: " + args.debayering)
print("Noise level: " + str(args.noise))
print("Frame stabilization mode: " + args.stab_mode)
print("Stabilization patch size (% of frame): " + str(args.stab_size))
print("Stabilization search width (pixels): " + str(args.stab_sw))
print("Percentage of best frames for reference frame computation: " + str(args.rf_percent))
if args.dark:
print("Image file for dark frame correction: " + args.dark)
if args.flat:
print("Image file for flat frame correction: " + args.flat)
print("")
print("Alignment point box width (pixels): " + str(args.align_box_width))
print("Alignment point search width (pixels): " + str(args.align_search_width))
print("Alignment point minimum structure: " + str(args.align_min_struct))
print("Alignment point minimum brightness: " + str(args.align_min_bright))
print("")
print("Percentage of best frames to be stacked: " + str(args.stack_percent))
print("Number of best frames to be stacked: " + str(args.stack_number))
print("Normalize frame brightness: " + str(args.normalize_bright))
print("Normalization black cut-off: " + str(args.normalize_bco)) | [
"rolf6419@gmx.de"
] | rolf6419@gmx.de |
1c8ab1dfbea664ba773fc25381cb7b6812ce63c6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_208/ch169_2020_06_22_16_29_40_113842.py | e3756cd075be6633bac009e64f5161497ba54e75 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | def login_disponivel (login,lista):
if login not in lista:
return login
else:
for login1 in range(len(lista)):
i = 1
while login in lista:
login = login + str(i)
if login in lista:
login = login [:-1]
i+=1
return login
lista1 = []
while True:
x = str(input("Digite o login: "))
if x != "fim":
login_ = login_disponivel(x,lista1)
lista1.append(x)
if x == "fim":
print(lista1)
break
| [
"you@example.com"
] | you@example.com |
4b69f6d838ad7f89786e05b2358d7efdeee75012 | 81b20a9c51779c21b779ac0b1c5bf669359521ef | /py_object_detection/tf_api/object_detection/builders/losses_builder.py | 6fea798572ee953d00fab78f2b82f3e9215c3c72 | [] | no_license | thekindler/py-object-detection | bae1401f025458605c9244f9a763e17a0138d2ec | a8d13c496bab392ef5c8ad91a20fbfa9af1899bb | refs/heads/master | 2023-06-23T02:42:08.180311 | 2021-07-17T18:40:46 | 2021-07-17T18:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,985 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build localization and classification losses from config."""
from py_object_detection.tf_api.object_detection.core import balanced_positive_negative_sampler as sampler
from py_object_detection.tf_api.object_detection.core import losses
from py_object_detection.tf_api.object_detection.protos import losses_pb2
def build(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
random_example_sampler: BalancedPositiveNegativeSampler object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
ValueError: If random_example_sampler is getting non-positive value as
desired positive example fraction.
"""
classification_loss = _build_classification_loss(
loss_config.classification_loss)
localization_loss = _build_localization_loss(
loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None
if loss_config.HasField('hard_example_miner'):
if (loss_config.classification_loss.WhichOneof('classification_loss') ==
'weighted_sigmoid_focal'):
raise ValueError('HardExampleMiner should not be used with sigmoid focal '
'loss')
hard_example_miner = build_hard_example_miner(
loss_config.hard_example_miner,
classification_weight,
localization_weight)
random_example_sampler = None
if loss_config.HasField('random_example_sampler'):
if loss_config.random_example_sampler.positive_sample_fraction <= 0:
raise ValueError('RandomExampleSampler should not use non-positive'
'value as positive sample fraction.')
random_example_sampler = sampler.BalancedPositiveNegativeSampler(
positive_fraction=loss_config.random_example_sampler.
positive_sample_fraction)
return (classification_loss, localization_loss, classification_weight,
localization_weight, hard_example_miner, random_example_sampler)
def build_hard_example_miner(config,
classification_weight,
localization_weight):
"""Builds hard example miner based on the config.
Args:
config: A losses_pb2.HardExampleMiner object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
Returns:
Hard example miner.
"""
loss_type = None
if config.loss_type == losses_pb2.HardExampleMiner.BOTH:
loss_type = 'both'
if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION:
loss_type = 'cls'
if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION:
loss_type = 'loc'
max_negatives_per_positive = None
num_hard_examples = None
if config.max_negatives_per_positive > 0:
max_negatives_per_positive = config.max_negatives_per_positive
if config.num_hard_examples > 0:
num_hard_examples = config.num_hard_examples
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=num_hard_examples,
iou_threshold=config.iou_threshold,
loss_type=loss_type,
cls_loss_weight=classification_weight,
loc_loss_weight=localization_weight,
max_negatives_per_positive=max_negatives_per_positive,
min_negatives_per_image=config.min_negatives_per_image)
return hard_example_miner
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
# By default, Faster RCNN second stage classifier uses Softmax loss
# with anchor-wise outputs.
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.LocalizationLoss):
raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.')
loss_type = loss_config.WhichOneof('localization_loss')
if loss_type == 'weighted_l2':
return losses.WeightedL2LocalizationLoss()
if loss_type == 'weighted_smooth_l1':
return losses.WeightedSmoothL1LocalizationLoss(
loss_config.weighted_smooth_l1.delta)
if loss_type == 'weighted_iou':
return losses.WeightedIOULocalizationLoss()
raise ValueError('Empty loss config.')
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.ClassificationLoss):
raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')
loss_type = loss_config.WhichOneof('classification_loss')
if loss_type == 'weighted_sigmoid':
return losses.WeightedSigmoidClassificationLoss()
if loss_type == 'weighted_sigmoid_focal':
config = loss_config.weighted_sigmoid_focal
alpha = None
if config.HasField('alpha'):
alpha = config.alpha
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if loss_type == 'weighted_softmax':
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if loss_type == 'weighted_logits_softmax':
config = loss_config.weighted_logits_softmax
return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=config.logit_scale)
if loss_type == 'bootstrapped_sigmoid':
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
raise ValueError('Empty loss config.')
| [
"uniquetrij@gmail.com"
] | uniquetrij@gmail.com |
87d375bbe50d0f9268ad0bf0297e41f7421d3d79 | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2021/June/Range Sum Query - Mutable.py | 706b6b702cad1c3dea2e2ba9c84f32a5268dccbd | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | from math import sqrt, ceil
from typing import List
class NumArray:
def __init__(self, nums: List[int]):
self.nums = nums
n = len(self.nums)
bucket_length = sqrt(n)
self.nr_buckets = ceil(n / bucket_length)
self.buckets = [0] * int(self.nr_buckets)
for i in range(n):
self.buckets[i // self.nr_buckets] += self.nums[i]
def update(self, index: int, val: int) -> None:
self.buckets[index // self.nr_buckets] += val - self.nums[index]
self.nums[index] = val
def sumRange(self, left: int, right: int) -> int:
start_bucket = left // self.nr_buckets
end_bucket = right // self.nr_buckets
if start_bucket == end_bucket:
total_sum = sum(self.nums[left: right + 1])
else:
total_sum = sum(self.nums[left: (start_bucket + 1) * self.nr_buckets]) # sum from first bucket
total_sum += sum(self.buckets[start_bucket + 1: end_bucket]) # sum from middle buckets
total_sum += sum(self.nums[end_bucket * self.nr_buckets: right + 1]) # sum from last bucket
return total_sum
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# obj.update(index,val)
# param_2 = obj.sumRange(left,right)
| [
"mariandanaila01@gmail.com"
] | mariandanaila01@gmail.com |
e24ab3fbb279a18b2d432b73ba6fdaf38bdadad2 | 4142b8c513d87361da196631f7edd82f11465abb | /python/round710/1506A.py | da3092ef9ff672dc4336fef5ebbc1e8262b7e825 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from sys import stdin
for _ in range(int(stdin.readline())):
n, m, x = list(map(int, stdin.readline().split()))
c, r = divmod(x-1, n)
res = r*m + c + 1
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
8079f22284b0b8cc9f38a51756f95e1d311cbfdf | a6f1389c97705724e6b5cc40b0abd56c885b0335 | /max_heap.py | 413690f975be30b09a839f1f85a748181bfc5814 | [] | no_license | iosmichael/interview-prep | c80854a05d5b6d2cfba4321bcea9b68c6649790c | 1635890b6f3ed6c132b3bf6e87f752d85d3280e1 | refs/heads/master | 2020-04-01T18:36:21.316874 | 2018-11-05T23:44:18 | 2018-11-05T23:44:18 | 153,502,988 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py | '''
Coding challenges: build a max heap tree in 10 mins
Michael Liu
'''
class Heap(object):
def __init__(self, data):
#starting index is 1
self.data = [0]
self.nums = 0
for dat in data:
self.insert(dat)
def heapify(self, start):
if start >= len(self.data):
return
left_child_val = right_child_val = float('-inf')
if self.left_child(start) < len(self.data):
left_child_val = self.data[self.left_child(start)]
if self.right_child(start) < len(self.data):
right_child_val = self.data[self.right_child(start)]
if self.data[start] < max(left_child_val, right_child_val):
if left_child_val > right_child_val:
self.data[start], self.data[self.left_child(start)] = left_child_val, self.data[start]
self.heapify(self.left_child(start))
else:
self.data[start], self.data[self.right_child(start)] = right_child_val, self.data[start]
self.heapify(self.right_child(start))
def insert(self, i):
self.data.append(i)
if self.nums != 0:
self.data[1], self.data[len(self.data) - 1] = self.data[len(self.data)-1], self.data[1]
self.nums += 1
self.heapify_recursive(1)
def heapify_recursive(self, i):
self.heapify(i)
if self.right_child(i) < len(self.data):
self.heapify_recursive(self.right_child(i))
if self.left_child(i) < len(self.data):
self.heapify_recursive(self.left_child(i))
def print_heap(self, start):
queue = [("", start)]
while len(queue) != 0:
prefix, node = queue.pop(0)
print(prefix, self.data[node])
if self.left_child(node) < len(self.data):
queue.append((prefix + "-", self.left_child(node)))
if self.right_child(node) < len(self.data):
queue.append((prefix + "-", self.right_child(node)))
def left_child(self, n):
return 2 * n
def right_child(self, n):
return 2 * n + 1
def get_max(self):
max_num = self.data.pop(1)
self.nums -= 1
self.heapify_recursive(1)
return max_num
def main():
data = [1, 2, 5, 2, 3]
heap = Heap(data)
heap.print_heap(1)
for i in range(heap.nums):
print(heap.get_max())
if __name__ == '__main__':
main() | [
"michaelliu@iresearch.com.cn"
] | michaelliu@iresearch.com.cn |
9a97bca1fc8062bacb8ad8f8f52f2f5c5cee640e | 181247a52877d8577b3d2bf96ee9b2683c0a2edc | /client-python/producer/hello_world_producer_synchronously_with_call_back.py | cd550a4f958abc82ca358abc9c5a1d29300fd92e | [
"Apache-2.0"
] | permissive | savadev/CCD-Apache-Kafka-Certification-Examination-Notes | 65f9470510243a9ba328f0f3d8a2d7b2903ff1d9 | 2e35fdd0d289bd91ed618c7096d5fad7becfb928 | refs/heads/master | 2023-03-06T15:15:06.721867 | 2021-02-18T17:02:25 | 2021-02-18T17:02:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | import socket
from confluent_kafka import Producer
def call_back(err, msg):
if err is not None:
print("Failed to deliver message: %s: %s" % (str(msg), str(err)))
else:
print("Message produced: %s" % (str(msg)))
print("Topic name : ", msg.topic)
print("offset : ", msg.offset)
print("timestamp : ", msg.timestamp)
conf = {'bootstrap.servers': "broker-1:19092,broker-2:29092,broker-3:39092",
'client.id': socket.gethostname()}
producer = Producer(conf)
producer.produce(topic='hello-world-topic', key=None, value="Hello World from Python", callback=call_back)
producer.flush() # can be used to make writes synchronous.
| [
"balaji.chopparapu@gmail.com"
] | balaji.chopparapu@gmail.com |
8aa95cbddef9faac1d7bf8cbdf032a87e54da017 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /u9mxp7LLxogAjAGDN_0.py | 28bbdb4bb9a2a71820a11d6e89ebb428d9d40700 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | """
A numbers **factor length** is simply its total number of factors.
For instance:
3: 1, 3
# 3's factor length = 2
8: 1, 2, 4, 8
# 8's factor length = 4
36 : 1, 2, 3, 4, 6, 9, 12, 18, 36
# 36's factor length = 9
Create a function that sorts a list by **factor length** in **descending
order**. If multiple numbers have the same factor length, sort these numbers
in **descending order** , with the largest first.
In the example below, since 13 and 7 both have only 2 factors, we put 13 ahead
of 7.
factor_sort([9, 7, 13, 12]) ➞ [12, 9, 13, 7]
# 12 : 6, 9: 3, 13: 2, 7: 2
### Examples
factor_sort([1, 2, 31, 4]) ➞ [4, 31, 2, 1]
factor_sort([5, 7, 9]) ➞ [9, 7, 5]
factor_sort([15, 8, 2, 3]) ➞ [15, 8, 3, 2]
### Notes
Descending order: numbers with a higher factor length go before numbers with a
lower factor length.
"""
def factor_sort(nums):
nums=sorted([[factors(i),i] for i in nums])[::-1]
return [i[1] for i in nums]
def factors(num):
return len([i for i in range(1,num+1) if num%i==0])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
34fe0b333f0f6e513fed9b4ab3192ffcbd9fcbb6 | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/tensorflow/python/keras/preprocessing/__init__.py | 7f6e4c8ba1fbc5e8264cf8618a5d1dc362f84d8d | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e1ff166defb534fc907c2cf431350e914660a6c2560baffc884c51b97960f923
size 1575
| [
"rksc.k1p@gmail.com"
] | rksc.k1p@gmail.com |
0b86ad91ba46470f01a77a8261beb4206834c864 | 2acf76ecc3ad14f5e0390df5db31ef17aecd91cb | /rookbook/server.py | 0b0b1e320c2757bd07b2e2b819fe20de52671726 | [] | no_license | zielmicha/rookbook | 420b2717a5d3d8b7968c41ca8296e37e3ce6b5ec | a54d693a83db88d0f7aabfe58ee01513e531bdb1 | refs/heads/master | 2020-12-22T15:40:13.499835 | 2020-02-15T18:03:17 | 2020-02-15T18:03:17 | 220,621,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,224 | py | import asyncio, http, os, io, glob, websockets, json
from lxml import etree
from . import book
def get_static(content_type, path):
with open(path, 'rb') as f:
return http.HTTPStatus.OK, [('content-type', content_type)], f.read()
static_file_map = {
'/static/index.js': ('text/javascript', 'client/dist/index.js'),
'/static/index.js.map': ('text/javascript', 'client/dist/index.js.map'),
'/static/react.js': ('text/javascript', 'client/node_modules/react/umd/react.development.js'),
'/static/react-dom.js': ('text/javascript', 'client/node_modules/react-dom/umd/react-dom.development.js'),
'/static/style.css': ('text/css', 'client/style.css'),
}
class WebServer:
def __init__(self, handler):
self.handler = handler
async def process_request(self, path, request_headers):
path = path.split('?')[0]
base_dir = os.path.dirname(__file__) + '/..'
if path == "/":
return get_static('text/html', os.path.join(base_dir, 'client/index.html'))
if path in static_file_map:
mime_type, local_path = static_file_map[path]
return get_static(mime_type, os.path.join(base_dir, local_path))
if path != "/websocket":
return http.HTTPStatus.NOT_FOUND, [], b'not found'
async def handle_websocket(self, websocket, path):
if path == '/websocket':
await self.handler.run(websocket)
def main(self, host, port):
start_server = websockets.serve(
self.handle_websocket, host, port, process_request=self.process_request,
origins=['http://%s:%d' % (host, port), 'https://%s:%d' % (host, port)] # type: ignore
)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
class Handler:
def __init__(self, book):
self.book = book
self.websockets = []
async def run(self, websocket):
await self.send_full_update(websocket)
self.websockets.append(websocket)
try:
async for msg in websocket:
await self.handle_message(websocket, msg)
finally:
self.websockets.remove(websocket)
async def send_full_update(self, websocket):
document = self.book.get_document_text()
print('send', document)
await websocket.send(json.dumps({'type': 'document', 'data': '<!-- gen -->\n' + document}))
for id, widget in self.book.widgets.items():
await websocket.send(json.dumps({'type': 'data', 'id': id,
'data': widget.data_json,
'header': widget.header_json}))
async def full_update(self):
# TODO: async
for ws in self.websockets:
await self.send_full_update(ws)
async def send_result(self, websocket, req, data):
await websocket.send(json.dumps({
'type': 'response',
'id': req['call_id'],
'data': data
}))
async def handle_message(self, websocket, msg_data):
print('msg', msg_data)
msg = json.loads(msg_data)
if msg['type'] == 'set':
self.book.set(msg['path'], msg['value'])
await self.full_update()
await websocket.send(json.dumps({'type': 'set-done', 'epoch': msg['epoch']}))
elif msg['type'] == 'action':
self.book.action(msg['path'], msg['value'])
await self.full_update()
elif msg['type'] == 'doc-add-widget':
self.book.doc_add_widget(parent_id=msg['parentId'], element_name=msg['element'])
await self.full_update()
elif msg['type'] == 'doc-set-text':
self.book.doc_set_text(selector=msg['selector'], new_value=msg['new_value'])
await self.full_update()
elif msg['type'] == 'doc-delete':
self.book.doc_delete(selector=msg['selector'])
await self.full_update()
elif msg['type'] == 'doc-add':
parser = etree.XMLParser(remove_blank_text=True)
self.book.doc_add(selector=msg['selector'], element=etree.XML(msg['xml'], parser=parser))
await self.full_update()
elif msg['type'] == 'doc-set-attr':
self.book.doc_set_attr(selector=msg['selector'], attrs=msg['attrs'])
await self.full_update()
elif msg['type'] == 'doc-replace-xml':
parser = etree.XMLParser(remove_blank_text=True)
try:
xml = etree.XML(msg['new_xml'], parser=parser)
except Exception:
await self.send_result(websocket, msg, {'error': 'failed to parse XML'})
else:
self.book.doc_replace_xml(msg['selector'], xml)
await self.send_result(websocket, msg, {'ok': True})
await self.full_update()
else:
print('unknown message', msg)
if __name__ == '__main__':
import sys
book = book.Book(document_path=sys.argv[1], data_path=sys.argv[2])
book.refresh()
from . import common
common.start_asyncio_ipython(local_ns=globals())
WebServer(Handler(book)).main('localhost', 5000)
| [
"michal@zielinscy.org.pl"
] | michal@zielinscy.org.pl |
b6742a605d30a14b56ddc7e9c2752801f04823cb | 078de01daa97d413ec91629fc553fd637404d4d1 | /manage.py | d12027a6383577ddef48b217627464e888080be8 | [] | no_license | crowdbotics-apps/test-user-1975 | c6e91cf4b0940630a5906df6e5085a789be5493e | bff22c4c5c825c158f166303aa34d421a3572f97 | refs/heads/master | 2022-12-11T05:57:00.701120 | 2019-04-05T17:17:46 | 2019-04-05T17:17:46 | 179,724,618 | 0 | 0 | null | 2022-12-08T05:00:06 | 2019-04-05T17:17:14 | Python | UTF-8 | Python | false | false | 812 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_user_1975.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e9e91d9998570c5d120492877e7906cd0fd27a61 | 3c52eb24728edf054842c162153c92d2a77dac4a | /Q1/boy.py | aa5090666e6314fc8cb060dc6385b28d393466f7 | [] | no_license | PPL-IIITA/ppl-assignment-shyam000 | 53afeacc94905e46713b710a9d162c364a08f896 | 8512c533ff3a0e43eb4913494238da10590101fb | refs/heads/master | 2021-01-21T05:29:06.520958 | 2017-02-27T06:18:18 | 2017-02-27T06:18:18 | 83,195,331 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | class Boy:
def __init__ (self,name,attractiveness,reqAttractiveness,Type,intelligence,budget):
self.name = name
self.attractiveness = attractiveness
self.reqAttractiveness = reqAttractiveness
self.Type = Type
self.intelligence = intelligence
self.status = 'single'
self.budget = budget
self.gfName = ''
self.happiness = 0
self.gfBudget = 0
def set_gf(self,gf):
self.gfName = gf
def changegfBudget(self,gfBudget):
self.gfBudget = gfBudget
def set_happiness(self,happiness) :
self.happiness = happiness
def isEligible(self,maintainanceBudget,attractiveness):
if(self.budget >= maintainanceBudget) and (attractiveness >= self.reqAttractiveness):
return True
else:
return False
| [
"noreply@github.com"
] | PPL-IIITA.noreply@github.com |
cd4ca89e51565e52a8b8011f9f6f3158fa32f12c | 7619aed8a311e2832634379762c373886f4354fb | /trace_floodlight_firewall-StarTopology4-steps200/openflow_replay_config.py | ab27d1ba43e9fb1c43662952cef93ba8dda5abba | [] | no_license | jmiserez/sdnracer-traces | b60f8588277c4dc2dad9fe270c05418c47d229b3 | 8991eee19103c8ebffd6ffe15d88dd8c25e1aad5 | refs/heads/master | 2021-01-21T18:21:32.040221 | 2015-12-15T14:34:46 | 2015-12-15T14:34:46 | 39,391,225 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow.openflow_replayer import OpenFlowReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='java -ea -Dlogback.configurationFile=./src/main/resources/logback-trace.xml -jar ./target/floodlight.jar -cf ./src/main/resources/trace_firewall.properties', label='c1', address='127.0.0.1', cwd='../floodlight')],
topology_class=StarTopology,
topology_params="num_hosts=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
ignore_interposition=False,
kill_controllers_on_exit=True)
control_flow = OpenFlowReplayer(simulation_config, "paper/trace_floodlight_firewall-StarTopology4-steps200/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'InvariantChecker.check_liveness'
# Bug signature: ""
| [
"jeremie@miserez.org"
] | jeremie@miserez.org |
6d6f18f23f0bfa640e44315af45fd2070e03b6ba | d2e80a7f2d93e9a38f37e70e12ff564986e76ede | /Python-cookbook-2nd/cb2_03/cb2_3_3_sol_1.py | 427c0a4f54818660fbf6defaa71b60562dcd4147 | [] | no_license | mahavivo/Python | ceff3d173948df241b4a1de5249fd1c82637a765 | 42d2ade2d47917ece0759ad83153baba1119cfa1 | refs/heads/master | 2020-05-21T10:01:31.076383 | 2018-02-04T13:35:07 | 2018-02-04T13:35:07 | 54,322,949 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from dateutil import rrule
import datetime
def weeks_between(start_date, end_date):
weeks = rrule.rrule(rrule.WEEKLY, dtstart=start_date, until=end_date)
return weeks.count()
| [
"mahavivo@126.com"
] | mahavivo@126.com |
e249e15b4e945f227280d87a8fd0c22f5f5404fa | 402ac64e93d36c2e4ea88f32f50d0b47b84dc16f | /TwitterClone/tweets/admin.py | 5e5f25a495a07f134b9f7f0158356e3431f4e728 | [] | no_license | akashgiricse/TwitterClone | e430e8eb7bfe7e60f3be5c2cf56138457bcb5028 | 8fb3edb36aa54aa81737284d0379112f457ce08f | refs/heads/master | 2021-04-18T20:44:00.335872 | 2018-05-16T18:00:00 | 2018-05-16T18:00:00 | 126,720,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from django.contrib import admin
# Register your models here.
from .forms import TweetModelForm
from .models import Tweet
# admin.site.register(Tweet)
class TweetModelAdmin(admin.ModelAdmin):
class Meta:
model = Tweet
admin.site.register(Tweet, TweetModelAdmin)
| [
"akashgiricse@gmail.com"
] | akashgiricse@gmail.com |
5b7d4eb4a9269304bd4ad33203096ae1001fdfe7 | 24a291e5eb298b7c2b4f1105d789ac488457b59c | /Python_Pandas_Basics/Pandas07_11_setIndex01_김민교.py | 90f6885de1a89b2c7dc304d78b8b0afa18265359 | [] | no_license | gmrdns03/Python-Introductory-Course_Minkyo | da3afff502ed44f178d5b3885fbb1b01249ad1de | ef0d4e16aee3dba6a4a10c422ef68b1465745833 | refs/heads/main | 2023-05-29T16:08:31.814542 | 2021-06-23T13:32:14 | 2021-06-23T13:32:14 | 379,300,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py |
# coding: utf-8
# '''index / set_index / reindex / reset_index'''
# In[2]:
import pandas as pd
exam_data = {'이름':['서준', '우현', '인아'],
'수학' : [90,80,70],
'영어' : [98,89,95],
'음악': [85,95,100],
'체육': [100,90,90]}
df = pd.DataFrame(exam_data)
print(df, '\n')
# In[5]:
# 특정 열을 데이터프레임의 행 인덱스로 설정
ndf = df.set_index(['이름'])
print(ndf, '\n')
print('='*30)
ndf2 = ndf.set_index('음악')
print(ndf2, '\n')
print('='*30)
ndf3 = ndf.set_index(['수학', '음악'])
print(ndf3, '\n')
print('='*30)
ndf4 = df.set_index(['음악', '체육'])
print(ndf4, '\n')
print('='*30)
| [
"noreply@github.com"
] | gmrdns03.noreply@github.com |
e14231b7aba338f8d2f8abff1314626b808ee9d9 | 5c724d6e03e4194680c793718a4f72a58ca66bb1 | /app/migrations/0151_auto_20181026_0352.py | 1a09b05d842c68193239fc5054aad9abce6d0427 | [] | no_license | tigrezhito1/bat | 26002de4540bb4eac2751a31171adc45687f4293 | 0ea6b9b85e130a201c21eb6cbf09bc21988d6443 | refs/heads/master | 2020-05-02T07:13:06.936015 | 2019-03-26T15:04:17 | 2019-03-26T15:04:17 | 177,812,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-10-26 08:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0150_auto_20181026_0346'),
]
operations = [
migrations.AlterField(
model_name='produccion',
name='fecha',
field=models.DateTimeField(default=datetime.datetime(2018, 10, 26, 3, 52, 22, 789940), editable=False, help_text='Fecha de recepci\xf3n de la llamada (No se puede modificar)'),
),
]
| [
"you@example.com"
] | you@example.com |
5bd58a8d3db1ba70468d22d0b32b71b4eea15847 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/scvmm/azure-mgmt-scvmm/azure/mgmt/scvmm/aio/_scvmm.py | 1d467ccbda4e3a25f2e6b6fd7ba5f5ed89a4ec01 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 6,185 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ._configuration import SCVMMConfiguration
from .operations import AvailabilitySetsOperations, CloudsOperations, InventoryItemsOperations, Operations, VirtualMachineTemplatesOperations, VirtualMachinesOperations, VirtualNetworksOperations, VmmServersOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SCVMM: # pylint: disable=too-many-instance-attributes
"""The Microsoft.ScVmm Rest API spec.
:ivar vmm_servers: VmmServersOperations operations
:vartype vmm_servers: azure.mgmt.scvmm.aio.operations.VmmServersOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.scvmm.aio.operations.Operations
:ivar clouds: CloudsOperations operations
:vartype clouds: azure.mgmt.scvmm.aio.operations.CloudsOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.scvmm.aio.operations.VirtualNetworksOperations
:ivar virtual_machines: VirtualMachinesOperations operations
:vartype virtual_machines: azure.mgmt.scvmm.aio.operations.VirtualMachinesOperations
:ivar virtual_machine_templates: VirtualMachineTemplatesOperations operations
:vartype virtual_machine_templates:
azure.mgmt.scvmm.aio.operations.VirtualMachineTemplatesOperations
:ivar availability_sets: AvailabilitySetsOperations operations
:vartype availability_sets: azure.mgmt.scvmm.aio.operations.AvailabilitySetsOperations
:ivar inventory_items: InventoryItemsOperations operations
:vartype inventory_items: azure.mgmt.scvmm.aio.operations.InventoryItemsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g.
00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2020-06-05-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = SCVMMConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.vmm_servers = VmmServersOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.clouds = CloudsOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_templates = VirtualMachineTemplatesOperations(self._client, self._config, self._serialize, self._deserialize)
self.availability_sets = AvailabilitySetsOperations(self._client, self._config, self._serialize, self._deserialize)
self.inventory_items = InventoryItemsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "SCVMM":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
6124ae92af04400051ec38844b0db31b8b940b49 | 80fd6511bce02d3809d4d4fbb5d9879c09f9a535 | /ui.py | 27e0ab6b55b271bc9ad37f96f54b62344e44e300 | [] | no_license | BennyKok/uv-align-distribute | bb5aa210d983bd9f97500dda8a8f05131d72ed5d | 1aad31c487612f86bb42bc9d5d399636450b452e | refs/heads/master | 2021-01-14T09:49:57.430336 | 2015-12-07T14:42:22 | 2015-12-07T14:42:22 | 66,824,994 | 0 | 0 | null | 2016-08-29T08:18:37 | 2016-08-29T08:18:37 | null | UTF-8 | Python | false | false | 2,445 | py |
import bpy
##############
# UI
##############
class IMAGE_PT_align_distribute(bpy.types.Panel):
bl_label = "Align\Distribute"
bl_space_type = 'IMAGE_EDITOR'
bl_region_type = 'TOOLS'
bl_category = "Tools"
@classmethod
def poll(cls, context):
sima = context.space_data
return sima.show_uvedit and \
not (context.tool_settings.use_uv_sculpt
or context.scene.tool_settings.use_uv_select_sync)
def draw(self, context):
scn = context.scene
layout = self.layout
layout.prop(scn, "relativeItems")
layout.prop(scn, "selectionAsGroup")
layout.separator()
layout.label(text="Align:")
box = layout.box()
row = box.row(True)
row.operator("uv.align_left_margin", "Left")
row.operator("uv.align_vertical_axis", "VAxis")
row.operator("uv.align_right_margin", "Right")
row = box.row(True)
row.operator("uv.align_top_margin", "Top")
row.operator("uv.align_horizontal_axis", "HAxis")
row.operator("uv.align_low_margin", "Low")
row = layout.row()
row.operator("uv.align_rotation", "Rotation")
row.operator("uv.equalize_scale", "Eq. Scale")
layout.separator()
# Another Panel??
layout.label(text="Distribute:")
box = layout.box()
row = box.row(True)
row.operator("uv.distribute_ledges_horizontally", "LEdges")
row.operator("uv.distribute_center_horizontally",
"HCenters")
row.operator("uv.distribute_redges_horizontally",
"RCenters")
row = box.row(True)
row.operator("uv.distribute_tedges_vertically", "TEdges")
row.operator("uv.distribute_center_vertically", "VCenters")
row.operator("uv.distribute_bedges_vertically", "BEdges")
row = layout.row(True)
row.operator("uv.equalize_horizontal_gap", "Eq. HGap")
row.operator("uv.equalize_vertical_gap", "Eq. VGap")
#wip
#row = layout.row(True)
#row.operator("uv.remove_overlaps", "Remove Overlaps")
layout.separator()
layout.label("Others:")
row = layout.row()
layout.operator("uv.snap_islands")
row = layout.row()
layout.operator("uv.match_islands") | [
"none@none"
] | none@none |
cf7adeaaa9a4b1b387a721fea95dcb84be70228d | ad1d46b4ec75ef1f00520ff246d0706c6bb7770e | /content/chapters/transform-strings/07.py | b419a5904a6ca3411f46ba81c537f890f9dc5da1 | [] | no_license | roberto-arista/PythonForDesigners | 036f69bae73095b6f49254255fc473a8ab7ee7bb | 1a781ea7c7ee21e9c64771ba3bf5634ad550692c | refs/heads/master | 2022-02-24T15:28:04.167558 | 2021-09-07T10:37:01 | 2021-09-07T10:37:01 | 168,937,263 | 103 | 37 | null | 2022-02-11T02:24:01 | 2019-02-03T11:17:51 | Python | UTF-8 | Python | false | false | 209 | py | # we import the euler constant from the math module
from math import e
# then we print the constant value
print(f'euler: {e}') # euler: 2.718281828459045
# note the different amount of digits after the period
| [
"arista.rob@gmail.com"
] | arista.rob@gmail.com |
026ef60b487bbe9b43329cf88076dd339b18923a | e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163 | /ScientificComputing/ch18/simple_pendulum_period.py | 960c0d2dad9265076e26a94ed236db5197dc8fd7 | [] | no_license | socrates77-sh/learn | a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b | ae50978023f6b098b168b8cca82fba263af444aa | refs/heads/master | 2022-12-16T16:53:50.231577 | 2019-07-13T13:52:42 | 2019-07-13T13:52:42 | 168,442,963 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-01-31T01:30:06 | HTML | UTF-8 | Python | false | false | 980 | py | # -*- coding: utf-8 -*-
from math import sin, sqrt
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import fsolve
import pylab as pl
from scipy.special import ellipk
g = 9.8
def pendulum_equations(w, t, l):
th, v = w
dth = v
dv = - g/l * sin(th)
return dth, dv
def pendulum_th(t, l, th0):
track = odeint(pendulum_equations, (th0, 0), [0, t], args=(l,))
return track[-1, 0]
def pendulum_period(l, th0):
t0 = 2*np.pi*sqrt(l/g) / 4
t = fsolve(pendulum_th, t0, args=(l, th0))
return t*4
ths = np.arange(0, np.pi/2.0, 0.01)
periods = [pendulum_period(1, th) for th in ths]
periods2 = 4*sqrt(1.0/g)*ellipk(np.sin(ths/2)**2) # 计算单摆周期的精确值
pl.plot(ths, periods, label=u"fsolve计算的单摆周期", linewidth=4.0)
pl.plot(ths, periods2, "r", label=u"单摆周期精确值", linewidth=2.0)
pl.legend(loc='upper left')
pl.xlabel(u"初始摆角(弧度)")
pl.ylabel(u"摆动周期(秒)")
pl.show()
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
1f43950d64ce5a38f3b7d2a3519a97f7f52bac9e | 1141cd4aeffafe496bb7d8a1399ca7c8445edd6e | /tests/functional/test_yet_another_tests.py | a3516905a07d5a3c2a61ef4aa4ab70e912400f88 | [
"Apache-2.0"
] | permissive | amleshkov/adcm | d338c3b7c51e38ffe9a0b2715c85e54bed0c4f46 | e1c67e3041437ad9e17dccc6c95c5ac02184eddb | refs/heads/master | 2020-11-30T15:35:57.456194 | 2019-12-16T20:27:06 | 2019-12-16T20:27:06 | 230,432,278 | 0 | 0 | NOASSERTION | 2019-12-27T11:30:23 | 2019-12-27T11:30:22 | null | UTF-8 | Python | false | false | 1,543 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import coreapi
import pytest
# pylint: disable=W0611, W0621
from tests.library import steps
from tests.library.errorcodes import BUNDLE_ERROR, INVALID_OBJECT_DEFINITION
BUNDLES = os.path.join(os.path.dirname(__file__), "stacks/")
testcases = [
("cluster"),
("host")
]
@pytest.mark.parametrize('testcase', testcases)
def test_handle_unknown_words_in_bundle(client, testcase):
bundledir = os.path.join(BUNDLES, 'unknown_words_in_' + testcase + '_bundle')
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
steps.upload_bundle(client, bundledir)
INVALID_OBJECT_DEFINITION.equal(e, 'Not allowed key', 'in ' + testcase)
def test_shouldnt_load_same_bundle_twice(client):
bundledir = os.path.join(BUNDLES, 'bundle_directory_exist')
steps.upload_bundle(client, bundledir)
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
steps.upload_bundle(client, bundledir)
BUNDLE_ERROR.equal(e, 'bundle directory', 'already exists')
| [
"cab@arenadata.io"
] | cab@arenadata.io |
d47af8dbdf1a77b0a251514121261421f9fa680b | 7477ca97110f1a173b04029a4ad710cf15627b63 | /prep.py | 6d57d22a683d4e2e8e918b9ba7bb57661a68c339 | [] | no_license | bkj/nbsgd | d51e64ad269c94b85f4b6397de66bc00066126d0 | 0cf58b28322e8a213ebbc00b6f9e878be092dad5 | refs/heads/master | 2021-09-19T10:18:36.946661 | 2018-07-26T17:48:35 | 2018-07-26T17:48:35 | 113,483,964 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,655 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
nbsgd.py
"""
from __future__ import print_function, division
import os
import re
import sys
import string
import argparse
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.feature_extraction.text import CountVectorizer
# --
# Helpers
def texts_from_folders(src, names):
texts, labels = [], []
for idx, name in enumerate(names):
path = os.path.join(src, name)
for fname in sorted(os.listdir(path)):
fpath = os.path.join(path, fname)
texts.append(open(fpath).read())
labels.append(idx)
return texts,np.array(labels)
def bow2adjlist(X, maxcols=None):
x = coo_matrix(X)
_, counts = np.unique(x.row, return_counts=True)
pos = np.hstack([np.arange(c) for c in counts])
adjlist = csr_matrix((x.col + 1, (x.row, pos)))
datlist = csr_matrix((x.data, (x.row, pos)))
if maxcols is not None:
adjlist, datlist = adjlist[:,:maxcols], datlist[:,:maxcols]
return adjlist, datlist
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--max-features', type=int, default=200000)
parser.add_argument('--max-words', type=int, default=1000)
parser.add_argument('--ngram-range', type=str, default='1,3')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
# --
# IO
print("prep.py: loading", file=sys.stderr)
text_train, y_train = texts_from_folders('data/aclImdb/train', ['neg', 'pos'])
text_val, y_val = texts_from_folders('data/aclImdb/test', ['neg', 'pos'])
# --
# Preprocess
print("prep.py: preprocessing", file=sys.stderr)
re_tok = re.compile('([%s“”¨«»®´·º½¾¿¡§£₤‘’])' % string.punctuation)
tokenizer = lambda x: re_tok.sub(r' \1 ', x).split()
vectorizer = CountVectorizer(
ngram_range=tuple(map(int, args.ngram_range.split(','))),
tokenizer=tokenizer,
max_features=args.max_features
)
X_train = vectorizer.fit_transform(text_train)
X_val = vectorizer.transform(text_val)
X_train_words, _ = bow2adjlist(X_train, maxcols=args.max_words)
X_val_words, _ = bow2adjlist(X_val, maxcols=args.max_words)
# --
# Save
print("prep.py: saving", file=sys.stderr)
np.save('./data/aclImdb/X_train', X_train)
np.save('./data/aclImdb/X_val', X_val)
np.save('./data/aclImdb/X_train_words', X_train_words)
np.save('./data/aclImdb/X_val_words', X_val_words)
np.save('./data/aclImdb/y_train', y_train)
np.save('./data/aclImdb/y_val', y_val)
| [
"bkj.322@gmail.com"
] | bkj.322@gmail.com |
4990c1d685cea7f2caba13a5affda77fb5f63742 | 0bf6ecbdebc7424a8946b29127d55c5bc1e7442e | /wetLab/migrations/0018_auto_20161107_1627.py | 8aaf712e6292ab09e4a16ebf06b67cda88e4bf56 | [] | no_license | dekkerlab/cLIMS | 2351a9c81f3e3ba982e073500a4a5cf2fd38ed51 | e76731032a5707027b53746a8f2cc9b01ab7c04e | refs/heads/master | 2021-03-27T06:28:49.718401 | 2017-10-10T19:22:33 | 2017-10-10T19:22:33 | 71,837,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-07 16:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wetLab', '0017_auto_20161107_1626'),
]
operations = [
migrations.RenameField(
model_name='treatmentrnai',
old_name='treatmentRnai_targetNucleotide_seq',
new_name='treatmentRnai_nucleotide_seq',
),
]
| [
"nanda@ankitas-mbp.ad.umassmed.edu"
] | nanda@ankitas-mbp.ad.umassmed.edu |
40550862ebcc5009dd432daf15c8a6c3f4ecfb55 | e94c3e02b390b7c37214218083e4c5b2ad622f60 | /算法与数据结构/LeetCode/逻辑与数学(Logic&Math)/679.24-点游戏.py | cf02b656c4d06bed7d5028e03cd98f9322998e85 | [
"MIT"
] | permissive | nomore-ly/Job | 1160e341d9c78c2f99846995893f0289f4e56cf6 | ff4fd24447e30e2d17f15696842e214fba7ad61b | refs/heads/master | 2023-06-21T00:23:47.594204 | 2021-07-23T07:29:47 | 2021-07-23T07:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | #
# @lc app=leetcode.cn id=679 lang=python3
#
# [679] 24 点游戏
#
# @lc code=start
class Solution:
def judgePoint24(self, nums: List[int]) -> bool:
if not nums: return False
def helper(nums):
if len(nums) == 1: return abs(nums[0]-24) < 1e-6
for i in range(len(nums)):
for j in range(len(nums)):
if i != j:
newnums = [nums[k] for k in range(len(nums)) if i != k != j]
if helper(newnums + [nums[i]+nums[j]]): return True
if helper(newnums + [nums[i]-nums[j]]): return True
if helper(newnums + [nums[i]*nums[j]]): return True
if nums[j] != 0 and helper(newnums + [nums[i]/nums[j]]): return True
return False
return helper(nums)
# @lc code=end
| [
"xiaoqi25478@foxmail.com"
] | xiaoqi25478@foxmail.com |
b914cb6d66206ebc1109e2bea312dded0e148325 | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/nonpc_short/timed_feedback/job45.py | 93e06ba131127884d7597be0b085b07ce2087fd4 | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,902 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.003
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_feedback/' + job_name + '*'
total_epochs = 95
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_feedback/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
def on_epoch_end(self, epoch, logs=None):
# send message of epoch end
message = job_name + ' epoch_end'
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| [
"baolin.li1994@gmail.com"
] | baolin.li1994@gmail.com |
e58c556098cbe8831eb24b68a3f714a0be5f5068 | 084c3246c44c2e5ae5a0dd38522cb19ac993fe35 | /commands/cmd_inpart.py | 452945864ed9a09ac3933101280315646afc25b9 | [] | no_license | archivest/PythonWars-1996 | 5bafaca65764ca0d0999b063a5411c53cdbbb0eb | b2b301233d72334cfd9b4404b32a45ac22f0b248 | refs/heads/master | 2023-02-06T09:53:32.464771 | 2020-12-30T07:37:03 | 2020-12-30T07:37:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,137 | py | # PythonWars copyright © 2020 by Paul Penner. All rights reserved. In order to
# use this codebase you must comply with all licenses.
#
# Original Diku Mud copyright © 1990, 1991 by Sebastian Hammer,
# Michael Seifert, Hans Henrik Stærfeldt, Tom Madsen, and Katja Nyboe.
#
# Merc Diku Mud improvements copyright © 1992, 1993 by Michael
# Chastain, Michael Quan, and Mitchell Tse.
#
# GodWars improvements copyright © 1995, 1996 by Richard Woolcock.
#
# ROM 2.4 is copyright 1993-1998 Russ Taylor. ROM has been brought to
# you by the ROM consortium: Russ Taylor (rtaylor@hypercube.org),
# Gabrielle Taylor (gtaylor@hypercube.org), and Brian Moore (zump@rom.org).
#
# Ported to Python by Davion of MudBytes.net using Miniboa
# (https://code.google.com/p/miniboa/).
#
# In order to use any part of this Merc Diku Mud, you must comply with
# both the original Diku license in 'license.doc' as well the Merc
# license in 'license.txt'. In particular, you may not remove either of
# these copyright notices.
#
# Much time and thought has gone into this software, and you are
# benefiting. We hope that you share your changes too. What goes
# around, comes around.
import game_utils
import interp
import merc
def cmd_inpart(ch, argument):
argument, arg1 = game_utils.read_word(argument)
argument, arg2 = game_utils.read_word(argument)
if ch.is_npc():
return
if not ch.is_demon() and not ch.special.is_set(merc.SPC_CHAMPION):
ch.huh()
return
if not arg1 or not arg2:
ch.send("Syntax: Inpart <person> <power>\n"
"Fangs (2500), Claws (2500), Horns (2500), Hooves (1500), Nightsight (3000),\n"
"Wings (1000), Might (7500), Toughness (7500), Speed (7500), Travel (1500),\n"
"Scry (7500), Shadowsight (7500), Move (500), Leap (500), Magic (1000),\n"
"Lifespan (100), Pact (0), Prince (0), Longsword (0), Shortsword (0).\n")
return
victim = ch.get_char_world(arg1)
if not victim:
ch.not_here(arg1)
return
if victim.is_npc():
ch.not_npc()
return
if victim.level != merc.LEVEL_AVATAR or (victim != ch and not victim.special.is_set(merc.SPC_CHAMPION)):
ch.send("Only on a champion.\n")
return
if victim != ch and not game_utils.str_cmp(victim.lord, ch.name) and not game_utils.str_cmp(victim.lord, ch.lord) and victim.lord:
ch.send("They are not your champion.\n")
return
if game_utils.str_cmp(arg2, "pact"):
if ch == victim:
ch.not_self()
return
if victim.is_immortal():
ch.not_imm()
return
if victim.special.is_set(merc.SPC_SIRE):
victim.send("You have lost the power to make pacts!\n")
ch.send("You remove their power to make pacts.\n")
victim.special.rem_bit(merc.SPC_SIRE)
else:
victim.send("You have been granted the power to make pacts!\n")
ch.send("You grant them the power to make pacts.\n")
victim.special.set_bit(merc.SPC_SIRE)
victim.save(force=True)
return
if game_utils.str_cmp(arg2, "prince"):
if ch == victim:
ch.not_self()
return
if not ch.is_demon():
ch.send("Only the Demon Lord has the power to make princes.\n")
return
if victim.special.is_set(merc.SPC_PRINCE):
victim.send("You have lost your princehood!\n")
ch.send("You remove their princehood.\n")
victim.special.rem_bit(merc.SPC_PRINCE)
else:
victim.send("You have been made a prince!\n")
ch.send("You make them a prince.\n")
victim.special.set_bit(merc.SPC_PRINCE)
victim.save(force=True)
return
if game_utils.str_cmp(arg2, "longsword"):
victim.send("You have been granted the power to transform into a demonic longsword!\n")
ch.send("You grant them the power to transform into a demonic longsword.\n")
victim.powers[merc.DPOWER_OBJ_VNUM] = 29662
victim.save(force=True)
return
if game_utils.str_cmp(arg2, "shortsword"):
victim.send("You have been granted the power to transform into a demonic shortsword!\n")
ch.send("You grant them the power to transform into a demonic shortsword.\n")
victim.powers[merc.DPOWER_OBJ_VNUM] = 29663
victim.save(force=True)
return
inpart_list = [("fangs", merc.DEM_FANGS, 2500), ("claws", merc.DEM_CLAWS, 2500), ("horns", merc.DEM_HORNS, 2500), ("hooves", merc.DEM_HOOVES, 1500),
("nightsight", merc.DEM_EYES, 3000), ("wings", merc.DEM_WINGS, 1000), ("might", merc.DEM_MIGHT, 7500),
("toughness", merc.DEM_TOUGH, 7500), ("speed", merc.DEM_SPEED, 7500), ("travel", merc.DEM_TRAVEL, 1500),
("scry", merc.DEM_SCRY, 7500), ("shadowsight", merc.DEM_SHADOWSIGHT, 3000), ("move", merc.DEM_MOVE, 500),
("leap", merc.DEM_LEAP, 500), ("magic", merc.DEM_MAGIC, 1000), ("lifespan", merc.DEM_LIFESPAN, 100)]
for (aa, bb, cc) in inpart_list:
if game_utils.str_cmp(arg2, aa):
inpart = bb
cost = cc
break
else:
ch.inpart("")
return
if victim.dempower.is_set(inpart):
ch.send("They have already got that power.\n")
return
if ch.powers[merc.DEMON_TOTAL] < cost or ch.powers[merc.DEMON_CURRENT] < cost:
ch.send("You have insufficient power to inpart that gift.\n")
return
victim.dempower.set_bit(inpart)
ch.powers[merc.DEMON_TOTAL] -= cost
ch.powers[merc.DEMON_CURRENT] -= cost
if victim != ch:
victim.send("You have been granted a demonic gift from your patron!\n")
victim.save(force=True)
ch.send("Ok.\n")
ch.save(force=True)
interp.register_command(
interp.CmdType(
name="inpart",
cmd_fun=cmd_inpart,
position=merc.POS_STANDING, level=3,
log=merc.LOG_NORMAL, show=True,
default_arg=""
)
)
| [
"jindrak@gmail.com"
] | jindrak@gmail.com |
b92702a1b5b0504d907b5fcb0501caa038e81947 | d8ea695288010f7496c8661bfc3a7675477dcba0 | /django/nmq/nmq/wsgi.py | b82b5a963bfae62f596fb2edf68fab7627d0e166 | [] | no_license | dabolau/demo | de9c593dabca26144ef8098c437369492797edd6 | 212f4c2ec6b49baef0ef5fcdee6f178fa21c5713 | refs/heads/master | 2021-01-17T16:09:48.381642 | 2018-10-08T10:12:45 | 2018-10-08T10:12:45 | 90,009,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for nmq project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nmq.settings")
application = get_wsgi_application()
| [
"dabolau@qq.com"
] | dabolau@qq.com |
24e54a2ea97767851ac33f66c7cf051b025cca23 | afd3ff42313acba5cd9c00b987441465606cf24e | /Chapter6-SetsDics/mySets.py | 5dcf1cf909c06124ae077db3a199bb96de3a887a | [] | no_license | sweet23/oreillyPython1 | a2978d26e620daeacfd6bb6e4bfd9b7042bd808f | 81923c6c2597226ef420330a06385a030fb99bfa | refs/heads/master | 2020-12-11T03:36:08.799756 | 2015-10-25T18:20:36 | 2015-10-25T18:20:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | '''
Created on Jan 31, 2015
@author: jay wilner 3
'''
print ({1, 2, 3, 1, 2, 3, 1, 2, 3, 1});
languages = {"perl", "python", "c++", "ruby"}
print(languages);
languages.add('php')
print(languages)
print('is Perl in languages?')
print('Perl' in languages)
print(" is {'python', 'ruby'} < languages ?")
print({'python', 'java'} < languages) | [
"info@outofboundscommunications.com"
] | info@outofboundscommunications.com |
06a3e9a59e86df3bfc73abeaf8421a85968339c6 | 5be58a04fbbafa83e366d6919df8673ae6897fca | /services/python_services/app.py | fb226279da5ae1b8f5eef8c1df2433cb5f6b5a3f | [] | no_license | EricSchles/domain-scan-orchestration | 37d5b28d7f7f03e9bd46ce539dd98a288a5fccc1 | 802e473536aec97cb4fce73881277a9d4f82e1f3 | refs/heads/master | 2021-01-23T07:08:49.940954 | 2017-09-07T19:09:12 | 2017-09-07T19:09:12 | 102,497,544 | 0 | 0 | null | 2017-09-05T15:19:35 | 2017-09-05T15:19:35 | null | UTF-8 | Python | false | false | 783 | py | from flask import Flask, request
import json
import subprocess
from web_design_standards_check import uswds_checker
app = Flask(__name__)
@app.route("/", methods=["GET","POST"])
def index():
return "whatever"
@app.route("/services/web-design-standards", methods=["GET", "POST"])
def services():
domain = request.args.get("domain")
return json.dumps(uswds_checker(domain))
@app.route("/services/pshtt", methods=["GET","POST"])
def pshtt():
result = subprocess.run(["pshtt", "whitehouse.gov"], stdout=subprocess.PIPE)
return result.stdout
@app.route("/services/command_test", methods=["GET","POST"])
def command_test():
result = subprocess.run(["test_command"], stdout=subprocess.PIPE)
return result.stdout
if __name__ == '__main__':
app.run()
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
c40feab46c498feab832014eeb2903169b874151 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/011eac1d9242903f4e2db5ccccc74d97ad1e7cb9-<update_subscriptions>-bug.py | 4e65c2c1b62f17801bf04586a963ae137d2cf185 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py |
def update_subscriptions():
try:
f = open(options.stream_file_path, 'r')
public_streams = simplejson.loads(f.read())
f.close()
except:
logger.exception('Error reading public streams:')
return
classes_to_subscribe = set()
for stream in public_streams:
zephyr_class = stream.encode('utf-8')
if ((options.shard is not None) and (not hashlib.sha1(zephyr_class).hexdigest().startswith(options.shard))):
continue
if (zephyr_class in current_zephyr_subs):
continue
classes_to_subscribe.add((zephyr_class, '*', '*'))
if (len(classes_to_subscribe) > 0):
zephyr_bulk_subscribe(list(classes_to_subscribe))
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
f5c955619eac91433d16f01da67789e821c60523 | 5b37d86af518b90cb848233c7f5f53befc15a5ed | /training_xvector.py | 9208238a0b168e05113d2f68ecb2aa7f345b1813 | [
"MIT"
] | permissive | taalua/x-vector-pytorch | 45fce3606eeb0b9a996179a1e0242d62e8393bcd | 7d86f78a1a70974df490ef7d2629de2d71dd1558 | refs/heads/master | 2023-07-21T04:47:45.596582 | 2021-08-25T17:58:58 | 2021-08-25T17:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from x_vectors.trainer import Trainer, Args
args=Args()
args.get_args()
args.loss_fun='AngleLoss'
# args.loss_fun='CrossEntropyLoss'
# args.loss_fun='AngularLoss'
# args.loss_fun='AdMSoftmaxLoss'
args.batch_size=3
trainer=Trainer(args)
#train
trainer.train(10) | [
"ristohinno@gmail.com"
] | ristohinno@gmail.com |
81768dc5d05076cdf91074bb336ea3aa70e5612b | 55a4d7ed3ad3bdf89e995eef2705719ecd989f25 | /main/law/spark_part/spark_part_limai_and_wenshu_origin/spark_part_main_74_102_laws_doc_judgment2_keshihua.py | d13cf17bf5872bc4381c7c87c4960e72bad18244 | [] | no_license | ichoukou/Bigdata | 31c1169ca742de5ab8c5671d88198338b79ab901 | 537d90ad24eff4742689eeaeabe48c6ffd9fae16 | refs/heads/master | 2020-04-17T04:58:15.532811 | 2018-12-11T08:56:42 | 2018-12-11T08:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,921 | py | # -*- coding: utf-8 -*-
"""
对抓取的文书内容进行数据提取
"""
import re
from pyspark import SparkContext,SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import json
import pymysql
from lxml import etree
import HTMLParser
import uuid as UUID
import time
if __name__ == "__main__":
# PYSPARK_PYTHON = "C:\\Python27\\python.exe" #多版本python情况下,需要配置这个变量指定使用哪个版本
# os.environ["PYSPARK_PYTHON"] = PYSPARK_PYTHON
conf = SparkConf()
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
# sc.setLogLevel("ERROR") # ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN '(select id,uuid,plaintiff_info,defendant_info from tmp_lawyers ) tmp'
# df = sqlContext.read.jdbc(url='jdbc:mysql://192.168.10.22:3306/laws_doc', table='(select id,uuid,doc_content from adjudication_civil where id >= 1010 and id <= 1041 and doc_from = "limai" ) tmp',column='id',lowerBound=1,upperBound=1800000,numPartitions=1,properties={"user": "tzp", "password": "123456"})
df = sqlContext.read.jdbc(url='jdbc:mysql://192.168.74.102:3306/laws_doc2', table='(select id,uuid,province,city,if_surrender,if_nosuccess,if_guity,if_accumulate,if_right,if_team,if_adult,age_year,org_plaintiff,org_defendant,dispute,court_cate,if_delay,duration,history,history_title,plaintiff_id,defendant_id,lawyer_id,lawyer from judgment2_main_etl ) tmp',column='id',lowerBound=1,upperBound=42588,numPartitions=10,properties={"user": "weiwc", "password": "HHly2017."})
def p(x):
print type(x),x
# print type(x[0]),type(x[1]),type(x[2]),type(x[3]),type(x[4])
# if len(x) >6:
# print x[0],x[1],x[2],x[3],x[4],x[5],x[6]
# else:print x[0],x[1],x[2],x[3],x[4],x[5]
def filter_(x):
if x :
return True
return False
def doc_items(items):
uuid = unicode(UUID.uuid3(UUID.NAMESPACE_DNS2, items[1].encode("utf8"))).replace("-", "")
return (items[0],uuid,items[2],items[3],items[4],
items[5],items[6],items[7],items[8],items[9],
items[10],items[11],items[12],items[13],items[14],
items[15],items[16],items[17],items[18],
items[19],items[20],items[21],items[22],items[23])
lawyer_k_v = df.map(lambda x:x).map(lambda x:doc_items(x))
# id, uuid, province, city, if_surrender, \
# if_nosuccess, if_guity, if_accumulate, if_right, \
# if_team, if_adult, age_year, org_plaintiff, org_defendant,\
# dispute, court_cate, if_delay, age_min, duration, history, \
# history_title, judge, plaintiff_id,\
# defendant_id, lawyer_id, lawyer,
schema = StructType([StructField("id", IntegerType(), False)
,StructField("uuid", StringType(), False)
,StructField("province", StringType(), True)
,StructField("city", StringType(), True)
,StructField("if_surrender", StringType(), True)
,StructField("if_nosuccess", StringType(), True)
,StructField("if_guity", StringType(), True)
,StructField("if_accumulate", StringType(), True)
,StructField("if_right", StringType(), True)
, StructField("if_team", StringType(), True)
, StructField("if_adult", StringType(), True)
, StructField("age_year", StringType(), True)
, StructField("org_plaintiff", StringType(), True)
, StructField("org_defendant", StringType(), True)
, StructField("dispute", StringType(), True)
, StructField("court_cate", StringType(), True)
, StructField("if_delay", StringType(), True)
, StructField("duration", StringType(), True)
, StructField("history", StringType(), True)
, StructField("history_title", StringType(), True)
, StructField("plaintiff_id", StringType(), True)
, StructField("defendant_id", StringType(), True)
, StructField("lawyer_id", StringType(), True)
, StructField("lawyer", StringType(), True)
])
f = sqlContext.createDataFrame(lawyer_k_v, schema=schema)
# f.show()
# , mode = "overwrite"
# useUnicode = true & characterEncoding = utf8,指定写入mysql时的数据编码,否则会乱码。
f.write.jdbc(url='jdbc:mysql://cdh5-slave2:3306/laws_doc_judgment?useUnicode=true&characterEncoding=utf8', table='judgment2_keshihua_only',properties={"user": "weiwc", "password": "HHly2017."})
sc.stop()
| [
"985819225@qq.com"
] | 985819225@qq.com |
8760e551048186bc85514da491a6a322ad8c4336 | e44ff4069f5b559954e7a66685c86b054a70de7a | /Practice Codes/CRDGAME2.py | 05854a976dad0fa141abd93f375f7b9e5497db8c | [] | no_license | SayanDutta001/Competitive-Programming-Codes | 2912985e037f83bcde8e7fcb0036f1e31fa626df | 6dac061c0a4b1c5e82b99ec134e9e77606508e15 | refs/heads/master | 2023-03-17T04:25:47.507594 | 2021-03-05T16:23:09 | 2021-03-05T16:23:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from math import pow
def trumpcard(n, a):
global mod
maxi = max(a)
turn = 1
cmaxi = a.count(maxi)
res = int(pow(2, n))%mod
if(cmaxi%2==0):
mini = min(cmaxi//2, cmaxi-cmaxi//2)
for i in range(mini):
turn = int(((turn%mod)*(cmaxi-i)%mod))%mod
turn = int(((turn%mod) * (pow(i+1, mod-2)%mod)))%mod
res -= ((pow(2, n-cmaxi)%mod)*(turn)%mod)%mod
if(res<0):
return (int(res)+mod)%mod
return int(res)%mod
test = int(input())
mod = 1000000007
for i in range(test):
n = int(input())
a = list(map(int, input().split()))
if(n==1):
print(2)
continue
print(trumpcard(n, a)) | [
"khanujabhupinder09@gmail.com"
] | khanujabhupinder09@gmail.com |
368e1d63d8dc911340339dc2b8dd3dbdb2a87eed | 40b42ccf2b6959d6fce74509201781be96f04475 | /tools/data/textdet/ic11_converter.py | 84f0099bad10425a4b6d5f8639b3a75a5cab47a0 | [
"Apache-2.0"
] | permissive | xdxie/WordArt | 2f1414d8e4edaa89333353d0b28e5096e1f87263 | 89bf8a218881b250d0ead7a0287526c69586c92a | refs/heads/main | 2023-05-23T02:04:22.185386 | 2023-03-06T11:51:43 | 2023-03-06T11:51:43 | 515,485,694 | 106 | 12 | null | null | null | null | UTF-8 | Python | false | false | 4,860 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
from PIL import Image
from mmocr.utils import convert_annotations
def convert_gif(img_path):
"""Convert the gif image to png format.
Args:
img_path (str): The path to the gif image
"""
img = Image.open(img_path)
dst_path = img_path.replace('gif', 'png')
img.save(dst_path)
os.remove(img_path)
print(f'Convert {img_path} to {dst_path}')
def collect_files(img_dir, gt_dir):
"""Collect all images and their corresponding groundtruth files.
Args:
img_dir (str): The image directory
gt_dir (str): The groundtruth directory
Returns:
files (list): The list of tuples (img_file, groundtruth_file)
"""
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
ann_list, imgs_list = [], []
for img in os.listdir(img_dir):
img_path = osp.join(img_dir, img)
# mmcv cannot read gif images, so convert them to png
if img.endswith('gif'):
convert_gif(img_path)
img_path = img_path.replace('gif', 'png')
imgs_list.append(img_path)
ann_list.append(osp.join(gt_dir, 'gt_' + img.split('.')[0] + '.txt'))
files = list(zip(sorted(imgs_list), sorted(ann_list)))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
"""Collect the annotation information.
Args:
files (list): The list of tuples (image_file, groundtruth_file)
nproc (int): The number of process to collect annotations
Returns:
images (list): The list of image information dicts
"""
assert isinstance(files, list)
assert isinstance(nproc, int)
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
"""Load the information of one image.
Args:
files (tuple): The tuple of (img_file, groundtruth_file)
Returns:
img_info (dict): The dict of the img and annotation information
"""
assert isinstance(files, tuple)
img_file, gt_file = files
# read imgs while ignoring orientations
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(
file_name=osp.join(osp.basename(img_file)),
height=img.shape[0],
width=img.shape[1],
segm_file=osp.join(osp.basename(gt_file)))
if osp.splitext(gt_file)[1] == '.txt':
img_info = load_txt_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
def load_txt_info(gt_file, img_info):
"""Collect the annotation information.
The annotation format is as the following:
left, top, right, bottom, "transcription"
Args:
gt_file (str): The path to ground-truth
img_info (dict): The dict of the img and annotation information
Returns:
img_info (dict): The dict of the img and annotation information
"""
anno_info = []
with open(gt_file, 'r') as f:
lines = f.readlines()
for line in lines:
xmin, ymin, xmax, ymax = line.split(',')[0:4]
x = max(0, int(xmin))
y = max(0, int(ymin))
w = int(xmax) - x
h = int(ymax) - y
bbox = [x, y, w, h]
segmentation = [x, y, x + w, y, x + w, y + h, x, y + h]
anno = dict(
iscrowd=0,
category_id=1,
bbox=bbox,
area=w * h,
segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and test set of IC11')
parser.add_argument('root_path', help='Root dir path of IC11')
parser.add_argument(
'--nproc', default=1, type=int, help='Number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
root_path = args.root_path
for split in ['training', 'test']:
print(f'Processing {split} set...')
with mmcv.Timer(print_tmpl='It takes {}s to convert annotation'):
files = collect_files(
osp.join(root_path, 'imgs', split),
osp.join(root_path, 'annotations', split))
image_infos = collect_annotations(files, nproc=args.nproc)
convert_annotations(
image_infos, osp.join(root_path,
'instances_' + split + '.json'))
if __name__ == '__main__':
main()
| [
"xudongxie77@gmail.com"
] | xudongxie77@gmail.com |
01f5dde1701543d02a96075a4da8cbe8ec20d8aa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03457/s520758102.py | 5d402c644ef6ae585d4175be890b33171eb27682 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | N = int(input())
t = [list(map(int,input().split())) for _ in range(N)]
x,k,y = 0,0,0
for i in range(N):
if (abs(t[i][1] - x) + abs(t[i][2] - y) > t[i][0] - k
or (abs(t[i][1] - x) + abs(t[i][2] - y))%2 != (t[i][0] - k)%2):
print("No")
exit()
x = t[i][1]
y = t[i][2]
k = t[i][0]
print("Yes") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2c8b64938a70966035ad6d45f9492406d100d4df | f914e863f4eae0c6474f8b8150a40969fc8e1a86 | /pyblast/blast/blast_parser.py | 3e1dc1285381ce2dcd7ae4dd27266f19fd316c5e | [
"MIT"
] | permissive | jvrana/pyblast | 0ff430f3f1f51d5b2118e1749f74e1dc8ff3b67f | 0f7ee7575e97470bfd05a2373d9c68247ec4ead0 | refs/heads/master | 2021-06-03T23:04:03.734484 | 2020-09-11T22:46:31 | 2020-09-11T22:46:31 | 102,554,910 | 0 | 0 | MIT | 2020-09-11T20:15:01 | 2017-09-06T02:49:51 | Python | UTF-8 | Python | false | false | 4,153 | py | """blast_parser."""
import re
class BlastResultParser:
"""Parses blast results."""
@staticmethod
def str_to_f_to_i(v):
try:
return int(v)
except ValueError:
try:
return float(v)
except ValueError:
pass
return v
@staticmethod
def _extract_metadata(r, delim):
"""Extracts information from the raw text file BLAST produces."""
g = re.search(
"#\\s*(?P<blast_ver>.+)\n"
+ "# Query:\\s*(?P<query>.*)\n"
+ "# Database:\\s*(?P<database>.+)\n"
+ r"(?:# Fields:\s*(?P<fields>.+))?",
r,
)
metadata = g.groupdict()
if metadata["fields"] is None:
return metadata
fields_array = re.split(r"\s*{}\s*".format(delim), metadata["fields"])
metadata["fields"] = fields_array
return metadata
@staticmethod
def _get_alignment_rows(r):
"""Split text into alignment rows."""
return re.findall("\n([^#].*)", r)
@classmethod
def _validate_matches(cls, raw_matches, fields):
"""Create a dictionary from the fields and rows."""
match_dicts = []
for m in raw_matches:
values = [cls.str_to_f_to_i(v) for v in m.split("\t")]
match_dicts.append(dict(list(zip(fields, values))))
return match_dicts
@staticmethod
def __convert_strand_label(strand_lable):
if strand_lable.strip().lower() != "plus":
return -1
return 1
@classmethod
def __clean_json(cls, data_list):
for data in data_list:
query = dict()
subject = dict()
query["start"] = data["q. start"]
query["end"] = data["q. end"]
query["bases"] = data["query seq"]
query["strand"] = cls.__convert_strand_label(
data.get("query strand", "plus")
)
query["length"] = data["query length"]
query["sequence_id"] = data["query acc."]
subject["start"] = data["s. start"]
subject["end"] = data["s. end"]
subject["bases"] = data["subject seq"]
subject["strand"] = cls.__convert_strand_label(data["subject strand"])
subject["length"] = data["subject length"]
subject["sequence_id"] = data["subject acc."]
meta = dict(data)
yield {"query": query, "subject": subject, "meta": meta}
@classmethod
def raw_results_to_json(cls, raw_text, delim=","):
"""Converts raw BLAST text into a flatten dictionary.
:param raw_text: raw text from BLAST results
:type raw_text: basestring
:param delim: delimiter for parsing
:type delim: basestring
:return: flattened dictionary
:rtype: dict
"""
if raw_text.strip() == "":
return []
meta = cls._extract_metadata(raw_text, delim)
fields = meta["fields"]
if fields is None:
return []
alignment_rows = cls._get_alignment_rows(raw_text)
match_dicts = cls._validate_matches(alignment_rows, tuple(fields))
data = list(cls.__clean_json(match_dicts))
return data
@staticmethod
def get_perfect(data):
"""Returns only exact matches.
:return:
:rtype:
"""
def no_gaps(x):
return x["meta"]["gaps"] == 0
def no_gap_opens(x):
return x["meta"]["gap opens"] == 0
def identical(x):
return x["meta"]["identical"] == x["meta"]["alignment length"]
def perfect(x):
return all([no_gaps(x), no_gap_opens(x), identical(x)])
return [r for r in data if perfect(r)]
@staticmethod
def get_with_perfect_subjects(data):
"""Returns only parsed alignments with 100% of the subject aligning to
the query.
:return: perfect alignments
:rtype:
"""
def f(x):
return x["meta"]["alignment_length"] == x["subject"]["length"]
return [r for r in data if f(r)]
| [
"justin.vrana@gmail.com"
] | justin.vrana@gmail.com |
9d18bc90a2fcc735b729a64bea157f50ea910245 | d8fd66452f17be82b964f9a93577dbaa2fa23451 | /ProxyPool/crawlers/BaseCrawler.py | 5c4d2652c82ed3aec63fcf89a522c66eea07f0e0 | [] | no_license | Dawinia/gp_DA_movie | 8b7575a54502896f5658538563f3f1f8cfe38772 | e0253cc8bc16daf1d32b9c861f7fcb03510937f6 | refs/heads/master | 2023-05-25T11:49:39.422657 | 2021-12-14T03:26:35 | 2021-12-14T03:26:35 | 233,504,205 | 3 | 0 | null | 2023-05-22T22:44:11 | 2020-01-13T03:33:48 | Python | UTF-8 | Python | false | false | 868 | py | # encoding: utf-8
"""
@author: dawinia
@time: 2020/4/21 上午1:13
@file: BaseCrawler.py
@desc:
"""
import requests
from lxml import etree
from ProxyPool.settings import HEADER
from retrying import retry
class BaseCrawler:
urls = []
def __init__(self):
self.headers = HEADER
@retry(stop_max_attempt_number=3, retry_on_result=lambda x: x is None)
def fetch(self, url, **kwargs):
try:
response = requests.get(url, headers=self.headers)
if response.status_code == 200:
return etree.HTML(response.text)
except requests.ConnectionError:
return
def crawl(self):
"""
get proxy from xicidaili
:return:
"""
for url in self.urls:
html = self.fetch(url)
for proxy in self.parse(html):
yield proxy
| [
"dawinialo@163.com"
] | dawinialo@163.com |
a63cb2d245bffaf953cf535d4278eb62d911c3b9 | 3c7057226c7bb01cd493cde5742b3979cf030f94 | /tests/unit/cli/commands/test_config.py | f0925fbe071a7bfee708495483c65fb2ccab507a | [
"Apache-2.0"
] | permissive | sharadc2001/lmctl | 2d047f776d1bbee811801ccc5454a097b1484841 | a220a3abeef5fc1f7c0a9410524625c2ff895a0a | refs/heads/master | 2023-05-27T06:14:49.425793 | 2021-04-29T20:08:52 | 2021-04-29T20:08:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,908 | py | from .command_testing import CommandTestCase
from lmctl.cli.entry import cli
from lmctl.config import ConfigParser, Config
from unittest.mock import patch
import tempfile
import os
import shutil
test_config = '''\
environments:
default:
tnco:
address: http://example
secure: True
client_id: Tester
'''
class TestConfigTarget(CommandTestCase):
def setUp(self):
super().setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
def test_created_config_is_parsable(self):
target_path = os.path.join(self.tmp_dir, 'config.yaml')
result = self.runner.invoke(cli, ['create', 'config', '--path', target_path])
self.assertTrue(os.path.exists(target_path), msg='Config file not created')
config = ConfigParser().from_file(target_path)
self.assertIsInstance(config, Config)
self.assertIn('default', config.environments)
def test_create_at_path(self):
target_path = os.path.join(self.tmp_dir, 'config.yaml')
result = self.runner.invoke(cli, ['create', 'config', '--path', target_path])
self.assertTrue(os.path.exists(target_path), msg='Config file not created')
def test_create_at_path_overwrite(self):
target_path = os.path.join(self.tmp_dir, 'config.yaml')
with open(target_path, 'w') as f:
f.write('Existing content')
result = self.runner.invoke(cli, ['create', 'config', '--path', target_path, '--overwrite'])
config = ConfigParser().from_file(target_path)
self.assertIsInstance(config, Config)
def test_create_at_path_that_exists_fails(self):
target_path = os.path.join(self.tmp_dir, 'config.yaml')
with open(target_path, 'w') as f:
f.write('Existing content')
result = self.runner.invoke(cli, ['create', 'config', '--path', target_path])
self.assert_has_system_exit(result)
expected_output = f'Error: Cannot create configuration file at path "{target_path}" because there is already a file there and "--overwrite" was not set'
self.assert_output(result, expected_output)
def test_create_also_creates_directory(self):
target_dir = os.path.join(self.tmp_dir, 'somedir')
target_path = os.path.join(target_dir, 'config.yaml')
result = self.runner.invoke(cli, ['create', 'config', '--path', target_path])
self.assertTrue(os.path.exists(target_dir), msg='Config directory not created')
self.assertTrue(os.path.isdir(target_dir), msg='Expected a directory to be created')
self.assertTrue(os.path.exists(target_path), msg='Config file not created')
config = ConfigParser().from_file(target_path)
self.assertIsInstance(config, Config)
def test_create_ok_with_existing_directory(self):
target_dir = os.path.join(self.tmp_dir, 'somedir')
os.makedirs(target_dir)
target_path = os.path.join(target_dir, 'config.yaml')
some_other_file = os.path.join(target_dir, 'someotherfile.yaml')
with open(some_other_file, 'w') as f:
f.write('Original content')
result = self.runner.invoke(cli, ['create', 'config', '--path', target_path])
self.assertTrue(os.path.exists(target_path), msg='Config file not created')
config = ConfigParser().from_file(target_path)
self.assertIsInstance(config, Config)
self.assertTrue(os.path.exists(some_other_file), msg='Existing file should not have been removed')
with open(some_other_file, 'r') as f:
content = f.read()
self.assertEqual(content, 'Original content')
@patch('lmctl.cli.commands.targets.config.ConfigFinder')
def test_create_at_default_directory(self, mock_finder):
default_dir = os.path.join(self.tmp_dir, 'defaultdir')
os.makedirs(default_dir)
default_path = os.path.join(default_dir, 'config.yaml')
mock_finder.return_value.get_default_config_path.return_value = default_path
result = self.runner.invoke(cli, ['create', 'config'])
self.assertTrue(os.path.exists(default_path), msg='Config file not created')
config = ConfigParser().from_file(default_path)
self.assertIsInstance(config, Config)
@patch('lmctl.cli.commands.targets.config.get_config')
def test_get(self, mock_get_config):
config_path = os.path.join(self.tmp_dir, 'config.yaml')
with open(config_path, 'w') as f:
f.write(test_config)
mock_get_config.return_value = (Config(), config_path)
result = self.runner.invoke(cli, ['get', 'config'])
self.assert_no_errors(result)
self.assert_output(result, test_config)
@patch('lmctl.cli.commands.targets.config.get_config')
def test_get_print_path(self, mock_get_config):
config_path = os.path.join(self.tmp_dir, 'config.yaml')
with open(config_path, 'w') as f:
f.write(test_config)
mock_get_config.return_value = (Config(), config_path)
result = self.runner.invoke(cli, ['get', 'config', '--print-path'])
self.assert_no_errors(result)
expected_output = f'Path: {config_path}'
expected_output += '\n---\n'
expected_output += test_config
self.assert_output(result, expected_output)
@patch('lmctl.cli.commands.targets.config.get_config')
def test_get_print_path_only(self, mock_get_config):
config_path = os.path.join(self.tmp_dir, 'config.yaml')
with open(config_path, 'w') as f:
f.write(test_config)
mock_get_config.return_value = (Config(), config_path)
result = self.runner.invoke(cli, ['get', 'config', '--path-only'])
self.assert_no_errors(result)
expected_output = config_path
self.assert_output(result, expected_output)
| [
"daniel.vaccaro-senna@ibm.com"
] | daniel.vaccaro-senna@ibm.com |
45b0809ced71943fe3f93497370ba9b6d5210ffd | fd63eec60cb9386cae6e786565c64a590a077dfb | /planet_bot/bottieverse/natural_language_processor.py | 48efd0639ff567a442241c6f7009bafb790e5c1c | [] | no_license | opemipoVRB/Planet-Bot | a8fd0b848331fdac675ffd38780ad93f9b83a356 | ac46aaec3b05021d1185ad3248faebaebbffe471 | refs/heads/master | 2020-04-01T09:55:39.962697 | 2019-01-15T21:25:09 | 2019-01-15T21:25:09 | 153,095,324 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,665 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$$...........................................................................$$
$$..........................$$$$$$$$$$$$$....................................$$
$$.......................$$$$$$$$$$$$$$$$$$$.................................$$
$$.....................$$$$$$$$$$$$$$$$$$$$$$$...............................$$
$$....................$$$$$$$$$$$$$$$$$$$$$$$$$..............................$$
$$...................$$$$$$$$$$$$$$$$$$$$$$.$$...............................$$
$$...................$$$$$$$$$$$$$$$$$$$$$...$$..............................$$
$$...................$$$$$$$$$$$$$$$$$$.$$...$$$.............................$$
$$...................$$$$$$$$$$$$$$$$$$$$$$$$$$..............................$$
$$....................$$$$$$$$$$$$$.....$$$$$$$$$............................$$
$$......................$$$$$$$$$$$$$$$$..$$$$$$$............................$$
$$...................................$$$.....................................$$
$$.................$$................$$$$ $$$$$$$........$...................$$
$$...............$$$$$$..............$$$$$$$$$$$$$...$$$$$$..................$$
$$............$$$$..$$$$$.........................$$$$$$$$$..................$$
$$............$$$$...$$$$$$$....................$$$$$$.$$.$$.................$$
$$...............$$$$$$$$$$$$$$............$$$$$$$$..........................$$
$$.........................$$$$$$$$$...$$$$$$$...............................$$
$$..............................$$$$$$$$$$...................................$$
$$..........................$$$$$....$$$$$$$$$...............................$$
$$............$$.$$$$$$$$$$$$$............$$$$$$$$$$$$$$$$$..................$$
$$............$$.$$..$$$$.....................$$$$$$$$$$$$$$.................$$
$$..............$$$$$$............................$$.$$$$$$$.................$$
$$.................. ......................$$
$$.................. @@@ @@@ @@@@@@@ @@@@@@@ .......................$$
$$.................. @@@ @@@ @@@ @@@@ @@@ @@@@.....................$$
$$.................. @@! @@@ @@! !@@ @@! !@@......................$$
$$.................. !@! @!@ !@! !@! !@! !@!......................$$
$$.................. @!@ !@! !!@!@!!@@! !!@!@!!@@!.....................$$
$$.................. !@! !!! !!! !!! !!! !!!....................$$
$$.................. :!: !!: !!: :!! !!: :::....................$$
$$................... ::!!:! :!: :!: :!: :::....................$$
$$.................... :::: ::: ::: ::: :::....................$$
$$...................... : : : ::::::: ....................$$
$$...........................................................................$$
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$$***************************************************************************$$
$$ natural_language_processor.py Created by Durodola Opemipo 2018 $$
$$ Personal Email : <opemipodurodola@gmail.com> $$
$$ Telephone Number: +2348182104309 $$
$$***************************************************************************$$
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
"""
| [
"opemipodurodola@gmail.com"
] | opemipodurodola@gmail.com |
49928abcbf8e7654bb4b23c2267fac3bd0d1f900 | 221b2221703f9cddeee7054c5dc426b81a3f53bd | /venv/lib/python3.9/site-packages/pony/orm/tests/pony_test_config.py | e17371c536e0109962126cacbc698ed946a963f4 | [] | no_license | ch3p4ll3/Royal-Racing-Bot | 37c998a650078e4b5f5c3b34b8c081d52b018944 | eab5baf61a9782fbedd42ddf35b7e11cbae9ec22 | refs/heads/main | 2023-06-26T03:34:58.104068 | 2021-07-30T17:36:14 | 2021-07-30T17:36:14 | 348,089,837 | 1 | 0 | null | 2021-03-20T11:32:46 | 2021-03-15T18:59:39 | Python | UTF-8 | Python | false | false | 277 | py | settings = dict(
provider = 'sqlite', filename = ':memory:'
# provider='postgres', user='pony', password='pony', host='localhost', port='5432', database='pony',
# provider='cockroach', user='root', host='localhost', port=26257, sslmode='disable', database='test'
)
| [
"slinucs@gmail.com"
] | slinucs@gmail.com |
5cba62895872e9e701325e7cebcfc0a76dd17ea5 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /gPJTSqmJ4qQPxRg5a_9.py | 589335772716b843414d88eabf4a6847eb7b156a | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py |
def func(num):
a=(list(str(num)))
b = len(a)
count = 0
for i in a:
count = count + (int(i)-b)
return count
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
600e60daec5c92452a456f9c96b11e8c7a7a69b7 | 58f81a20e6a22d17af626d423c6e1a5b160f784c | /services/core-api/app/api/now_applications/resources/unit_type_resource.py | 77fae773c5205096cf655c783f068ce023dbdfa0 | [
"Apache-2.0"
] | permissive | cryptobuks1/mds | 5e115c641dfa2d1a91097d49de9eeba1890f2b34 | 6e3f7006aeb5a93f061717e90846b2b0d620d616 | refs/heads/master | 2022-04-23T21:11:37.124243 | 2020-04-14T17:55:39 | 2020-04-14T17:55:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | from flask_restplus import Resource
from app.extensions import api
from app.api.utils.access_decorators import requires_role_view_all
from app.api.utils.resources_mixins import UserMixin
from app.api.now_applications.models.unit_type import UnitType
from app.api.now_applications.response_models import UNIT_TYPES
class UnitTypeResource(Resource, UserMixin):
@api.doc(description='Get a list of units and their codes. (ie degrees, meters etc)', params={})
@requires_role_view_all
@api.marshal_with(UNIT_TYPES, code=200, envelope='records')
def get(self):
return UnitType.get_active() | [
"bcgov-csnr-cd@gov.bc.ca"
] | bcgov-csnr-cd@gov.bc.ca |
62d171c9fc4af70e1cf3e93ca45a9173eed86f05 | 05341db2b66b544f1241eac8d459f90505c20e37 | /pyqt_distutils/__init__.py | c90267b7f0fee8e8fc5519470f35a931c4fb14a0 | [
"MIT"
] | permissive | xushoucai/pyqt_distutils | 4c9ee16113fcfa7d8f107933a6f99a4cc1e69554 | 83c95b6b7a37b612509f3caac98c715f10703724 | refs/heads/master | 2021-01-12T14:27:46.875463 | 2016-06-20T06:36:18 | 2016-06-20T06:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | """
A set of PyQt distutils extensions for build qt ui files in a pythonic way:
- build_ui: build qt ui/qrc files
"""
__version__ = '0.7.2'
| [
"colin.duquesnoy@gmail.com"
] | colin.duquesnoy@gmail.com |
f60ead416927933ce228c614991c92f42be50ac9 | bb2c530d891a95a5e93668ac3aa3bf71472c5909 | /PracticeWithFunctionsTestCases/test_coin_flip.py | d3cbac9ebe999bba7903143430fb6fc641e13217 | [] | no_license | http403/CS121 | 3e069805e53f2cda19427100225c3c4103f24f48 | 210fbd2d47fcdd63b7cb4c7b9ab1c9ef08c24b7a | refs/heads/master | 2023-03-06T06:41:33.546807 | 2020-03-09T21:09:08 | 2020-03-09T21:09:08 | 235,925,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # YOU CAN IGNORE THIS FILE ENTIRELY
from unittest import TestCase
from unittest.mock import patch
from functions import coin_flip
class TestCoinFlip(TestCase):
@patch('functions.print')
def test_coin_flip(self, mock_print):
coin_flip()
try:
mock_print.assert_called_with('Coin flip result is heads.')
except AssertionError:
mock_print.assert_called_with('Coin flip result is tails.') | [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
6528c8298b100f2a4c7f9b7942fa43168ff41697 | 39632b273847febed4e7c4f70f804b3c1bb05eb8 | /tpc/participate.py | f8bedf6c14c11299e48f3da31b0ba1413266e8fe | [] | no_license | MintYiqingchen/transaction-database | 6172e067efb21119dd7e8542010e379f41b656d2 | ffa73384693a5604cfc9c51a206570107beffebc | refs/heads/master | 2022-04-09T17:33:21.897370 | 2020-03-31T23:05:00 | 2020-03-31T23:05:00 | 237,696,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,631 | py | from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.client import ServerProxy, Error
from threading import Thread, Lock
import os
import sys
import argparse
import time
import socket
import psycopg2 as pg2
import psycopg2.pool
import asyncio
import traceback
from collections import defaultdict
parser = argparse.ArgumentParser()
parser.add_argument('--host', default="127.0.0.1")
parser.add_argument('--user', default="postgres")
parser.add_argument('--passwd', default="1z2x3c4v5b")
parser.add_argument('--database', default="tippers")
parser.add_argument('--port', default="5432")
parser.add_argument('--rpcport', default=15000, type=int)
parser.add_argument('--coordinator_uri', default="http://127.0.0.1:25000")
parser.add_argument('--thread_num', type=int, default=32)
parser.add_argument('--timeout', type=int, default=30)
args = parser.parse_args()
def statusItem():
return {'xid':None, 'status':'Init', 'task': None}
class Participate(object):
_rpc_methods_ = ['tpc_prepare', 'tpc_commit', 'tpc_abort', 'execute', 'wait_message']
def __init__(self, address, db_pool):
self.db_pool = db_pool
self._ip = address[0]
self._port = address[1]
self._serv = SimpleXMLRPCServer(address, allow_none=True)
for name in self._rpc_methods_:
self._serv.register_function(getattr(self, name))
self._status = defaultdict(statusItem) # txn_id --> ["Init", "Abort", "Prepare"]
self._locks = {} # txn_id --> threading.Lock
self._bigLock = Lock()
self._loop = asyncio.get_event_loop()
def recover_prepared_txn(self):
conn = self.db_pool.getconn()
uri = 'http://'+self._ip + ':'+str(self._port)
xids = conn.tpc_recover()
for xid in xids:
self._locks[xid.gtrid] = Lock()
self._status[xid.gtrid]['xid'] = xid
self._status[xid.gtrid]['status'] = 'Prepare'
key = list(self._status.keys())
print('After participate recover, txn_ids', key)
with ServerProxy(args.coordinator_uri, allow_none=True) as proxy:
for txn_id in key:
try:
res = proxy.recovery_message(txn_id, uri)
print('{} ask for txn_id {} op {}'.format(uri, txn_id, res['op']))
except ConnectionError as v:
print("Connection ERROR ", v)
continue
if res['op'] == 'COMMIT':
conn.tpc_commit(self._status[txn_id]['xid'])
del self._status[txn_id]
del self._locks[txn_id]
elif res['op'] == 'ABORT':
conn.tpc_rollback(self._status[txn_id]['xid'])
del self._status[txn_id]
del self._locks[txn_id]
self.db_pool.putconn(conn)
def wait_message(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0, 'isWait': 0}
return {'errCode': 0, 'isWait': 1}
def tpc_prepare(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0, 'vote': 0}
with self._locks[txn_id]:
self._status[txn_id]['task'].cancel()
if self._status[txn_id]['status'] == "Abort": # abort
return {'errCode': 0, 'vote': 0}
if self._status[txn_id]['status'] == "Prepare":
return {'errCode': 0, 'vote': 1}
conn = self.db_pool.getconn(txn_id)
conn.tpc_prepare()
self._status[txn_id]['status'] = 'Prepare'
return {'errCode': 0, 'vote': 1}
def tpc_abort(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0}
with self._locks[txn_id]:
if self._status[txn_id]['status'] == 'Prepare':
conn = self.db_pool.getconn(txn_id)
conn.tpc_rollback()
self.db_pool.putconn(conn, key = txn_id)
del self._status[txn_id]
del self._locks[txn_id]
return {'errCode': 0}
def tpc_commit(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0}
with self._locks[txn_id]:
if self._status[txn_id]['status'] == 'Prepare':
conn = self.db_pool.getconn(txn_id)
conn.tpc_commit()
self.db_pool.putconn(conn, key = txn_id)
del self._status[txn_id]
del self._locks[txn_id]
return {'errCode': 0}
def execute(self, txn_id, sql):
while True:
try:
conn = self.db_pool.getconn(txn_id)
break
except Exception as e:
print('Execute Error ', e)
time.sleep(25)
with self._bigLock:
if txn_id not in self._locks:
self._locks[txn_id] = Lock()
with self._locks[txn_id]:
if txn_id not in self._status:
xid = conn.xid(0, txn_id, 'pj2')
task = self._loop.call_later(args.timeout, serv.change_to_abort, txn_id)
self._status[txn_id] = {'xid': xid, 'status': 'Init', 'task': task}
conn.tpc_begin(xid)
elif self._status[txn_id]['status'] != "Init":
return {'errCode': 1, 'errString': "Participate status is "+self._status[txn_id]['status']}
try:
with conn.cursor() as curs:
curs.execute(sql)
except pg2.DatabaseError:
traceback.print_exc()
self._status[txn_id]['status'] = "Abort"
conn.tpc_rollback()
self.db_pool.putconn(conn, key=txn_id)
return {'errCode': 0}
def serve_forever(self):
self._serv.serve_forever()
def participate_register(self):
with ServerProxy(args.coordinator_uri, allow_none=True) as proxy:
uri = 'http://'+self._ip + ':'+str(self._port)
a = proxy.participate_register(uri)
def change_to_abort(self, txn_id):
if txn_id not in self._locks:
return
with self._locks[txn_id]:
if self._status[txn_id]['status'] != "Init":
return
conn = self.db_pool.getconn(txn_id)
conn.tpc_rollback()
self.db_pool.putconn(conn, key=txn_id)
self._status[txn_id]['status'] = 'Abort'
def timeout_loop(self):
try:
self._loop.run_forever()
except Exception:
self._loop.close()
if __name__ == '__main__':
global IP
try:
pgpool = psycopg2.pool.ThreadedConnectionPool(args.thread_num, 100,\
host = args.host, user=args.user, password=args.passwd, database=args.database, port=args.port)
except:
raise Exception("unable to connect to database")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
IP = s.getsockname()[0]
s.close()
print(IP)
serv = Participate((IP, args.rpcport), pgpool)
for _ in range(1, args.thread_num):
t = Thread(target=serv.serve_forever)
t.daemon = True
t.start()
t = Thread(target=serv.timeout_loop)
t.daemon = True
t.start()
serv.recover_prepared_txn()
serv.participate_register()
serv.serve_forever() | [
"821244608@qq.com"
] | 821244608@qq.com |
6fc75f5b8dadbafbb0a244d38751341b91a114ee | a2ef47ae30bbf44175aa2f3398f9d3f7358870a6 | /fnsservice/test4.py | 03b8c2c22a835406770415aea4cbb1513c9c0300 | [] | no_license | kmakeev/fnsservice | a1342f045441f0ee6683cfbbe1358d11bb3cf60e | 66d677ce6cc298d7404fb4db5ffe1162a5f493a3 | refs/heads/master | 2021-06-17T18:09:15.894765 | 2021-02-10T07:39:35 | 2021-02-10T07:39:35 | 243,219,506 | 2 | 1 | null | 2021-02-10T07:22:23 | 2020-02-26T09:11:34 | JavaScript | UTF-8 | Python | false | false | 221 | py | # -*- coding: utf-8 -*-
from bokeh.io import show, output_file
from fnsservice.fns.views import DublicatedGraph
g = DublicatedGraph(width=800, height=800, N=1500)
p = g.get_graph()
output_file("graph.html")
show(p)
| [
"kmakeev@arbitr.ru"
] | kmakeev@arbitr.ru |
3f778c20399ecda6ea8109817fb7a9c9d796ad82 | 0190835675f591b159463e50e7d814bbe5303d8d | /django_confit/loaders.py | dc9bfac0338e7af071529d7e3461273f47944a10 | [] | no_license | zebuline/django-confit | 56b46dafbe0aa0b067e80086dfbc953bdc44250f | ad9fcd458d8d6409066acd110e385c24f2f0fe37 | refs/heads/master | 2021-01-18T08:44:11.729820 | 2014-06-30T13:42:41 | 2014-06-30T13:42:41 | 32,524,209 | 0 | 0 | null | 2015-03-19T13:52:25 | 2015-03-19T13:52:24 | Python | UTF-8 | Python | false | false | 3,370 | py | # -*- coding: utf-8 -*-
"""Utilities to load configuration from various sources:
* from :attr:`os.environ` or similar dictionary:
:func:`settings_from_string_mapping`;
* from Python module: :func:`settings_from_module`;
* from JSON or YAML file: :func:`settings_from_file`.
"""
import json
import six
import yaml
def load_mapping(input, prefix=''):
"""Convert mapping of {key: string} to {key: complex type}.
This function makes it possible (and easy) to load complex types from
single-level key-value stores, such as environment variables or INI files.
Of course, both flat and nested mappings are supported:
>>> flat_mapping = {'DEBUG': 'True', 'SECRET_KEY': 'not a secret'}
>>> output = load_mapping(flat_mapping)
>>> output == flat_mapping
True
>>> nested_mapping = {'DATABASES': {'USER': 'me', 'HOST': 'localhost'}}
>>> output = load_mapping(nested_mapping)
>>> output == nested_mapping
True
Values can be complex types (sequences, mappings) using JSON or YAML.
Keys using ".json" or ".yaml" suffix are automatically decoded:
>>> nested_mapping = {
... 'DATABASES.yaml': 'ENGINE: sqlite3',
... }
>>> output = load_mapping(nested_mapping)
>>> output['DATABASES'] == {'ENGINE': 'sqlite3'}
True
You can use optional ``prefix`` argument to load only a subset of mapping:
>>> mapping = {'YES_ONE': '1', 'NO_TWO': '2'}
>>> load_mapping(mapping, prefix='YES_')
{'ONE': '1'}
"""
output = {}
for key, value in six.iteritems(input):
if key.startswith(prefix):
key = key[len(prefix):]
if key.endswith('.json'):
output[key[:-5]] = json.loads(value)
elif key.endswith('.yaml'):
output[key[:-5]] = yaml.load(value)
else:
output[key] = value
return output
def load_file(file_obj):
"""Return mapping from file object, using ``name`` attr to guess format.
Supported file formats are JSON and YAML. The lowercase extension is used
to guess the file type.
>>> from six.moves import StringIO
>>> file_obj = StringIO('SOME_LIST: [a, b, c]')
>>> file_obj.name = 'something.yaml'
>>> load_file(file_obj) == {
... 'SOME_LIST': ['a', 'b', 'c'],
... }
True
"""
file_name = file_obj.name
if file_name.endswith('.yaml'):
return yaml.load(file_obj)
elif file_name.endswith('.json'):
return json.load(file_obj)
else:
raise ValueError(
'Cannot guess format of configuration file "{name}". '
'Expected one of these extensions: "{extensions}".'.format(
name=file_name,
extensions='", "'.join('.yaml', '.json')))
def load_module(module_path):
"""Return module's globals as a dict.
>>> settings = load_module('django.conf.global_settings')
>>> settings['DATABASES']
{}
It does not load "protected" and "private" attributes (those with
underscores).
>>> '__name__' in settings
False
"""
module = __import__(module_path, fromlist='*', level=0)
is_uppercase = lambda x: x.upper() == x
is_special = lambda x: x.startswith('_')
return dict([(key, value) for key, value in module.__dict__.items()
if is_uppercase(key) and not is_special(key)])
| [
"benoit@marmelune.net"
] | benoit@marmelune.net |
059cb11799e13b9f2b0d70c4c4df93b82a2fad6f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_bleaching.py | 2f016290a8dda6c9ca2d17f70eb94990392f8f9d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _BLEACHING():
def __init__(self,):
self.name = "BLEACHING"
self.definitions = bleach
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bleach']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e8a46ea11ce16518bff9bf364ce3f3ddbecfb4c9 | 153da69b35f032f5b83a06f17008ba41a1b336b4 | /src/demo/calculator/core/enums/calc_operations.py | be7a79dd2dc2c43523ceea7e9ca10aca5c105aa2 | [
"MIT"
] | permissive | TrendingTechnology/hspylib | 6400cadf9dfe6ab5733712dcfeccf8022d61c589 | c79a2c17e89fe21d00ccd9c1646a03407cd61839 | refs/heads/master | 2023-06-20T15:47:35.962661 | 2021-07-19T22:12:18 | 2021-07-19T23:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TODO Purpose of the file
@project: HSPyLib
hspylib.demo.calculator.core.enum
@file: calc_operations.py
@created: Tue, 4 May 2021
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior"
@site: https://github.com/yorevs/hspylib
@license: MIT - Please refer to <https://opensource.org/licenses/MIT>
Copyright 2021, HSPyLib team
"""
from hspylib.core.enums.enumeration import Enumeration
class CalcOperations(Enumeration):
NO_OP = None
DIVISION = '/'
MULTIPLICATION = 'x'
SUBTRACTION = '-'
SUM = '+'
PERCENT = '%'
| [
"yorevs@gmail.com"
] | yorevs@gmail.com |
2bc8b8d2909fc34cf28f01487adfe67227bc5f8f | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Email/Reverse Email Address Lookup/buster/.eggs/twint-2.1.20-py3.9.egg/twint/storage/db.py | f15eb7a49b4a932756426874e2e973dd6935f7c1 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1bfee100772a62a8b82dc9142325b46126ad5a38e7090ebdd4c72c1a70a5cc92
size 10419
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
4a589351fa2393c773bb74d5a6069d589b1e9813 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/Test002/其他流程控制工具_20201204100832.py | 731c162d1e5a7392659bf40e9605c24fcfa3c253 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | #if 语句
# x = int(input("请输入数字:"))
# if x < 0:
# x = 0
# print("Negative changed to zero")
# elif x == 0:
# print("Zero")
# elif x ==1:
# print("Single")
# else:
# print("More")
# for语句
# words = ['cat', 'window', 'defenestrate']
# for i in words:
# print(i,len(i))
# for i in range(5):
# print(i)
# for i in range(5,10):
# print(i)
# for i in range(0,10,2):
# print(i)
# for i in range(-10,-100,-10):
# print(i)
# a = ['Mary', 'had', 'a', 'little', 'lamb']
# for i in range(len(a)):
# print(i,a[i])
# print(range(10))
# for i in range(10):
# print(i)
# a = sum(range(0,11))
# print(a)
# b = list(range(0,11))
# print(b)
# for i in range(0,11):
# print(i)
# for n in range(2,10):
# for x in range(2,n):
# if n % x ==0:
# print(n, 'equals', x, '*', n//x)
# break
# else:
# print(n," is a prime number")
# for i in range(2,10):
# if i % 2 ==0:
# print("even number",i)
# continue
# print("old number",i)
# pass语句
# 定义函数
def sum(n):
a,b = 0,1
while a < n:
print(a,end=" ")
| [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
431a3d4d65b0f75ede18b9904310843857734faa | a4b81839fe6d7726eb6c9d2c9fd0d9a70cf3ef3f | /Code/LuanqibazaoExercise/20191123/01.py | d20c16f2cd24a38ecd39cdb9d6b9118d2fd98e65 | [] | no_license | Python87-com/PythonExercise | 3f84e0f194254d0f0a8b106348b214ccdeebf842 | 9bef64c6d3b143236bf06131e5f7a5aabcf6980b | refs/heads/master | 2021-01-31T15:56:27.743718 | 2020-01-02T12:10:43 | 2020-01-02T12:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | """
属性私有化
"""
class Student:
def __init__(self, name, sex):
self.name = name
self.__sex = sex
def play(self):
print(self.name, "性别是", self.__sex)
# 创建对象
s1 = Student("Python87", "女")
s1.play() # Python87 性别是 女
s1.__sex = "男"
s1.play() # Python87 性别是 女
| [
"10293665@qq.com"
] | 10293665@qq.com |
a35eab73e09286089b343c8b91fc0a7562c32b20 | f8666599b83d34c861651861cc7db5b3c434fc87 | /plotly/validators/scatter3d/marker/colorbar/tickformatstop/_enabled.py | 7738271280f1775097e8652e9c73659f9a88a3a9 | [
"MIT"
] | permissive | mode/plotly.py | 8b66806e88c9f1820d478bab726f0bea81884432 | c5a9ac386a40df2816e6c13264dadf14299401e4 | refs/heads/master | 2022-08-26T00:07:35.376636 | 2018-09-26T19:08:54 | 2018-09-26T19:19:31 | 60,372,968 | 1 | 1 | MIT | 2019-11-13T23:03:22 | 2016-06-03T19:34:55 | Python | UTF-8 | Python | false | false | 514 | py | import _plotly_utils.basevalidators
class EnabledValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='enabled',
parent_name='scatter3d.marker.colorbar.tickformatstop',
**kwargs
):
super(EnabledValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| [
"noreply@github.com"
] | mode.noreply@github.com |
bdf3194356fbcc1281fbaebb3743619a8406c1c7 | c0da86779f7037d9fa50499c470f8dd91fb11093 | /DjangoProject/报障系统/StandYourGround/StandYourGround/wsgi.py | 490cdd0c653e5384cf689bd9b80cceb3a8ba7787 | [] | no_license | SuoSuo-Rocky/HaiYan_left_Pro | 9c96148a9fe9edc191b2aa1ba1a4caf55184d2e1 | 670aedee8b403141c5e81615dea89d28dfcd9ebb | refs/heads/master | 2023-01-12T07:12:53.602217 | 2019-07-07T06:20:25 | 2019-07-07T06:20:25 | 195,602,247 | 0 | 0 | null | 2023-01-05T20:39:51 | 2019-07-07T02:39:33 | JavaScript | UTF-8 | Python | false | false | 407 | py | """
WSGI config for StandYourGround project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StandYourGround.settings')
application = get_wsgi_application()
| [
"dt1930dgbomb@aliyun.com"
] | dt1930dgbomb@aliyun.com |
ef1fb7e03ab2eaaddb3e5df941f24e1d597911b9 | 0ddb274058b7cf4dc4152b4167195ed322914795 | /dxfwrite/algebra/base.py | bdf9002723cfebd13914c55d8162a76c3cd6c901 | [
"MIT"
] | permissive | allartburns/dxfwrite | b920843426ebd8cac9c66b84d306655494208d90 | 2679407a4efc797a616ac26898312f0ba1a24041 | refs/heads/master | 2020-05-29T22:43:01.545611 | 2015-04-11T00:32:43 | 2015-04-11T00:32:43 | 33,616,670 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | #!/usr/bin/env python
#coding:utf-8
# Purpose:
# Created: 27.03.2010
__author__ = "mozman <mozman@gmx.at>"
import math
HALF_PI = math.pi / 2.
THREE_PI_HALF = 1.5 * math.pi
DOUBLE_PI = math.pi * 2.
def rotate_2d(point, angle):
""" rotate point around origin point about angle """
x = point[0] * math.cos(angle) - point[1] * math.sin(angle)
y = point[1] * math.cos(angle) + point[0] * math.sin(angle)
return (x, y)
def equals_almost(v1, v2, places=7):
"""compare two float values
places: significant decimal places
"""
return round(v1, places) == round(v2, places)
def normalize_angle(angle):
""" return an angle between 0 and 2*pi """
angle = math.fmod(angle, DOUBLE_PI)
if angle < 0:
angle += DOUBLE_PI
return angle
def is_vertical_angle(angle, places=7):
""" returns True for 1/2pi and 3/2pi """
angle = normalize_angle(angle)
return (equals_almost(angle, HALF_PI, places) or
equals_almost(angle, THREE_PI_HALF, places))
def get_angle(p1, p2):
"""calc angle between the line p1-p2 and the x-axis
input: points as tuples
result: angle in radians
"""
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.atan2(dy, dx)
def right_of_line(point, p1, p2):
""" True if the point self is right of the line p1 -> p2
"""
return not left_of_line(point, p1, p2)
def left_of_line(point, p1, p2):
""" True if the point self is left of the line p1 -> p2
"""
# check if a and b are on the same vertical line
if p1[0] == p2[0]:
# compute # on which site of the line self should be
should_be_left = p1[1] < p2[1]
if should_be_left:
return point[0] < p1[0]
else:
return point[0] > p1[0]
else:
# get pitch of line
pitch = (p2[1] - p1[1]) / (p2[0] - p1[0])
# get y-value at c's x-position
y = pitch * (point[0] - p1[0]) + p1[1]
# compute if point should be above or below the line
should_be_above = p1[0] < p2[0]
if should_be_above :
return point[1] > y
else:
return point[1] < y
| [
"mozman@gmx.at"
] | mozman@gmx.at |
e67646d83b215b610ac774793c91227ab79589ca | 2bcf18252fa9144ece3e824834ac0e117ad0bdf3 | /Kraken/tags/0.2/release_the_kraken.py.example | d4a845fa0a3f49dc07d99e0751d2aa7fb8f4dee9 | [
"Beerware"
] | permissive | chadwhitacre/public | 32f65ba8e35d38c69ed4d0edd333283a239c5e1d | 0c67fd7ec8bce1d8c56c7ff3506f31a99362b502 | refs/heads/master | 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | example | #!/usr/local/bin/python
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE":
# <chad@zetaweb.com> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. --Chad Whitacre
# ----------------------------------------------------------------------------
###### change this value ######
LAIR = '/path/to/Kraken'
# This is here so that we work properly when called from cron. It can't be in
# conf/kraken.conf because we need it to get there! Catch-22! If you know the
# right way to do this please let me know! !!!
###############################
from Kraken import Kraken
k = Kraken(LAIR)
k.release() | [
"chad@zetaweb.com"
] | chad@zetaweb.com |
7635aee8611720059e84cdb29ab3d3f1adff70a0 | 3afe7348e830a0c5139fb7cf393736e18b59ab4a | /src/clusterfuzz/_internal/platforms/linux/lkl/kernel_utils.py | 7a30a1d4229158f8a6c2d24ce8a1809175c497ae | [
"Apache-2.0"
] | permissive | google/clusterfuzz | 00845899e081dbbb89b70a75ce0b7eba3da73b02 | 6501a839b27a264500244f32bace8bee4d5cb9a2 | refs/heads/master | 2023-09-03T17:34:17.821599 | 2023-09-01T16:11:51 | 2023-09-01T16:11:51 | 168,060,021 | 5,420 | 639 | Apache-2.0 | 2023-09-13T16:40:54 | 2019-01-29T00:19:40 | Python | UTF-8 | Python | false | false | 2,768 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linux Kernel Library kernel utils functions."""
import os
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.platforms.android import symbols_downloader
from clusterfuzz._internal.system import environment
from . import constants
def _should_download_symbols():
"""Return True if we should continue to download symbols."""
# For local testing, we do not have access to the cloud storage bucket with
# the symbols. In this case, just bail out.
return not environment.get_value('LOCAL_DEVELOPMENT')
def get_kernel_prefix_and_full_hash(build_id):
"""Download repo.prop and return the full hash and prefix."""
android_kernel_repo_data = _get_repo_prop_data(build_id,
constants.LKL_BUILD_TARGET)
if android_kernel_repo_data:
for line in android_kernel_repo_data.splitlines():
if line.startswith(constants.LKL_REPO_KERNEL_PREFIX):
# line is of form: prefix u'hash'
return (constants.LKL_REPO_KERNEL_PREFIX, line.split(' ',
1)[1].strip('u\''))
return None, None
def _get_repo_prop_data(build_id, fuzz_target):
"""Downloads repo.prop and returuns the data based on build_id and target."""
symbols_directory = os.path.join(
environment.get_value('SYMBOLS_DIR'), fuzz_target)
repro_filename = symbols_downloader.get_repo_prop_archive_filename(
build_id, fuzz_target)
# Grab repo.prop, it is not on the device nor in the build_dir.
_download_kernel_repo_prop_if_needed(symbols_directory, build_id, fuzz_target)
local_repo_path = utils.find_binary_path(symbols_directory, repro_filename)
if local_repo_path and os.path.exists(local_repo_path):
return utils.read_data_from_file(local_repo_path, eval_data=False).decode()
return None
def _download_kernel_repo_prop_if_needed(symbols_directory, build_id,
fuzz_target):
"""Downloads the repo.prop for an LKL fuzzer"""
if not _should_download_symbols():
return
symbols_downloader.download_repo_prop_if_needed(
symbols_directory, build_id, fuzz_target, [fuzz_target], 'lkl_fuzzer')
| [
"noreply@github.com"
] | google.noreply@github.com |
4e25e31332ac40a4a1cd89fc7e30e4ff9596fc27 | 3b6b8223598de9ec75d827945a613f75c3c6f132 | /03-Spider/6_my_ex_se_aio/lagou_se.py | bf6610509f18c30b22e6c8a365df3ec2082f4d61 | [] | no_license | Flavio58it/grocery | 32380a145d59fb5604c44cd1d7cfe50fedb1e0dd | 23e35d4b67208542344985b5e4865b7d5d314be4 | refs/heads/master | 2022-10-01T17:26:26.409840 | 2018-11-20T11:05:31 | 2018-11-20T11:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,150 | py |
"""
使用自动化测试工具 selenium 和 BeautifulSoup 抓取 拉钩网的职位信息
"""
import time
import pymongo
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
def get_html(url, keywords):
"""
获取 页面 返回获取的页面列表
:param url: 目标网站 这是是 拉钩网
:param keywords: 搜索的关键字
:return: 获取的页面列表
"""
# 存放 获取的页面的容器, 最后返回此容器
page_html_list = []
chome_options = Options()
chome_options.add_argument('--headless')
chome_options.add_argument('--disable-gpu')
chromeDriver = 'D:/00venv/soft/chromedriver_win32/chromedriver.exe'
# 后台运行
browser = webdriver.Chrome(chromeDriver, chrome_options=chome_options)
# 不是后台运行
# browser = webdriver.Chrome(chromeDriver)
# 后台运行 使用 phantomjs 下载:http://phantomjs.org/download.html
# chromeDriver = r"D:\00venv\soft\phantomjs-2.1.1-windows\bin\phantomjs.exe"
# browser = webdriver.PhantomJS(chromeDriver)
browser.get(url) # 获取页面首页
time.sleep(3)
# 首页 弹框 需要选择城市 这里选择的是成都
try:
browser.find_element_by_xpath('//*[@id="changeCityBox"]/ul/li[7]/a').click()
time.sleep(2)
except:
try:
browser.find_element_by_xpath('//*[@id="filterCollapse"]/div[1]/div[2]/li/div[1]/a[1]').click()
except:
pass
pass
# 全国
# all_in_china = browser.find_element_by_xpath('//*[@id="filterCollapse"]/div[1]/div[2]/li/div[1]/a[1]')
# 切换到 全国进行查找
# all_in_china.click()
# time.sleep(2)
# 其他城市 a[1] - a[13] 更多需要切换页面 暂时就这么多
# 可以通过循环来 获取 这里暂时不写
# city = browser.find_element_by_xpath('//*[@id="filterCollapse"]/div[1]/div[2]/li/div[2]/div/a[4]')
# 进入页面后 搜索的 元素框是不变的, 所有可以放在外面, 只需要在循环中添加关键字就行
search = browser.find_element_by_xpath('//*[@id="search_input"]')
for keyword in keywords:
# 将关键字写入到搜索框中
search.send_keys(keyword)
# 点击搜索
browser.find_element_by_xpath('//*[@id="search_button"]').click()
# 点击事件后 休眠 2 秒 等待页面全部加载出来
time.sleep(2)
# 第一次获取失败后 尝试的 次数, 这里设置的是三次,三次还获取不到,进入下一页
retry_time = 0
# 默认是第一页, 换下一页从 2 开始
page_num = 2
# 设置标志为, 循环终止条件
flag = True
while flag:
# 下一页
try:
next_page = browser.find_element_by_xpath('//*[@id="s_position_list"]/div[2]/div/span[%s]' % str(page_num))
next_page.click()
time.sleep(2)
# 获取页面
page_html = browser.page_source
# 页面添加到列表中
page_html_list.append(page_html)
# 一次获取成功 页码加 1
page_num += 1
# 判断下一页的 下一页 因为最后有 next 这个按钮, 判断 next 后还有没有元素 来终止循环
try:
browser.find_element_by_xpath('//*[@id="s_position_list"]/div[2]/div/span[%s]' % str(page_num + 1))
except:
flag = False
except:
retry_time += 1
print('第 %s 页,第 %s 尝试抓取!' % (page_num, retry_time))
if retry_time > 3:
print('结束获取页面')
page_num += 1
# 关闭浏览器
browser.quit()
return page_html_list
def main():
# 本地
# mongo = pymongo.MongoClient('mongodb://127.0.0.1:27017')
# 阿里云
mongo = pymongo.MongoClient('mongodb://39.104.171.126:10004')
db = mongo.spider
url = 'https://www.lagou.com/'
keywords = ['python']
# keywords = ['python', '爬虫', '大数据']
page_html_list = get_html(url, keywords) # 获取所有的网页信息
for page_html in page_html_list:
page = BeautifulSoup(page_html, 'lxml') # 初始化 bs 对象
company_list = page.find_all('div', {'class', 'list_item_top'}) # 获取每页的公司列表
for company in company_list: # 遍历 获取需要的信息
company_name = company.find("", {'class': "company_name"}).find('a').get_text()
job = company.find('h3').get_text()
salary = company.find('span', {'class': 'money'}).get_text()
# 插入数据库
db.lagou.insert({'公司:': company_name, '职位:': job, '工资:': salary})
print('获取拉钩网数据完毕!')
if __name__ == '__main__':
main()
| [
"1367000465@qq.com"
] | 1367000465@qq.com |
3110e4e698a996e1e82b66366b19e9b17f240a2c | 9da8d60ba0c37a8f5d1f4a7ea8f33f7996b9f1bf | /39.No_Idea!.py | 831a88d5f728e99974a73893bab037b24c3ef517 | [] | no_license | save6/HackerRank | b7e764200e813453fe6037512652f1c1df1fdff3 | da7038b586399e599fdd9e96f7c3b599d928f6a7 | refs/heads/master | 2023-08-25T13:36:05.632435 | 2021-10-27T22:19:43 | 2021-10-27T22:19:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__=='__main__':
_ = input()
answer = list(input().split())
list_A = set(input().split())
list_B = set(input().split())
happiness = 0
for a in answer:
if a in list_A:
happiness += 1
if a in list_B:
happiness -= 1
print(happiness)
| [
"save6green@gmail.com"
] | save6green@gmail.com |
ed2fbd6f401e303eb8cc4479a60c8bd94a1b8e22 | 377cbbe140fd0faf1eb53ba3794de816ac307cde | /src/experiment/TrainModelExperiment.py | c8b657d6d4c6fbbf74afb4ddf14ee5a51f53cb48 | [
"MIT"
] | permissive | dhruvtapasvi/implementation | fcbd7ab8e7b1368a0f07ee41dc5f0b6d6708c206 | 964980f431517f4548a87172a05107cdf700fb84 | refs/heads/master | 2021-09-16T01:47:50.601661 | 2018-05-17T19:22:44 | 2018-05-17T19:22:44 | 114,498,055 | 1 | 0 | MIT | 2018-05-05T02:17:35 | 2017-12-16T23:59:13 | Python | UTF-8 | Python | false | false | 966 | py | from experiment.Experiment import Experiment
from model.VariationalAutoencoder import VariationalAutoencoder
from dataset.loader.DatasetLoader import DatasetLoader
class TrainModelExperiment(Experiment):
def __init__(self, variationalAutoencoder: VariationalAutoencoder, datasetLoader: DatasetLoader, epochs, batchSize):
self.__variationalAutoencoder = variationalAutoencoder
self.__datasetLoader = datasetLoader
self.__epochs = epochs
self.__batchSize = batchSize
def run(self):
"""
Train the model specified in the constructor with the parameters specified there too
Side effect: the model is trained
:return: The model training history
"""
(xTrain, _), (xValidation, _), _ = self.__datasetLoader.loadData()
modelTrainingHistory = self.__variationalAutoencoder.train(xTrain, xValidation, self.__epochs, self.__batchSize)
return modelTrainingHistory.history
| [
"dhruv.tapasvi1996@gmail.com"
] | dhruv.tapasvi1996@gmail.com |
4cdf1fd5f6441560066c488889e7395009341605 | c364fca8ae4c896dee2c8b0dc545f4d73c8c8314 | /unsupervised_learning/0x01-clustering/8-main_2.py | bca71c105dad636e7f26d8135412abb0afb57834 | [
"MIT"
] | permissive | ledbagholberton/holbertonschool-machine_learning | 7672509d2dc1775bd6708430d244e8f4dd4cb169 | eaf23423ec0f412f103f5931d6610fdd67bcc5be | refs/heads/master | 2020-12-22T01:12:32.824436 | 2020-10-11T12:36:48 | 2020-10-11T12:36:48 | 236,623,497 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | #!/usr/bin/env python3
import numpy as np
EM = __import__('8-EM').expectation_maximization
if __name__ == '__main__':
np.random.seed(11)
a = np.random.multivariate_normal([30, 40], [[75, 5], [5, 75]], size=10000)
b = np.random.multivariate_normal([5, 25], [[16, 10], [10, 16]], size=750)
c = np.random.multivariate_normal([60, 30], [[16, 0], [0, 16]], size=750)
d = np.random.multivariate_normal([20, 70], [[35, 10], [10, 35]], size=1000)
X = np.concatenate((a, b, c, d), axis=0)
np.random.shuffle(X)
k = 4
pi, m, S, g, l = EM(X, k, tol=0., verbose=True)
print(pi)
print(m)
print(S)
print(g)
print(l)
| [
"789@holbertonschool.com"
] | 789@holbertonschool.com |
baba214cbfbc300cbbf3cfac8cea9aa9c1149d96 | 8f205d31e8e5555d69e0a7db086a3c93de6d2806 | /task_scripts/merge_overlaps.py | 7bb49e8383e4cfd29478c39d245d5a50face9f88 | [
"MIT"
] | permissive | torms3/Synaptor | 94e0f04478118399db91d79a8a8b478858fd4138 | 5de74aa61b3d04e88e6bc4c336d543f89d64b9a4 | refs/heads/master | 2021-05-21T19:08:43.625841 | 2020-06-19T23:10:47 | 2020-06-19T23:10:47 | 252,764,824 | 0 | 0 | NOASSERTION | 2020-04-03T15:03:17 | 2020-04-03T15:03:16 | null | UTF-8 | Python | false | false | 384 | py | """
Merge Edges Wrapper Script
Merges overlap matrices together
Writes the segments of max overlap
"""
import synaptor as s
import argparse
parser = argparse.ArgumentParser()
# Inputs & Outputs
parser.add_argument("storagestr")
parser.add_argument("--timing_tag", default=None)
args = parser.parse_args()
print(vars(args))
s.proc.tasks_w_io.merge_overlaps_task(**vars(args))
| [
"nturner.stanford@gmail.com"
] | nturner.stanford@gmail.com |
cd37b9253d5a541d3945acb4e29a7a6cc456b84b | f9a5e7233875989f994438ce267907d8210d60a1 | /test/pump_sensor/metalearning/knn_ranking/RMSE/k=3/extra_trees/sensor_prediction_extraTrees_AUCPRC.py | c51bf8495532222f767c46f192a351e7ebb6c9e6 | [] | no_license | renoslyssiotis/When-are-Machine-learning-models-required-and-when-is-Statistics-enough | da8d53d44a69f4620954a32af3aacca45e1ed641 | 6af1670a74345f509c86b7bdb4aa0761c5b058ff | refs/heads/master | 2022-08-29T20:21:57.553737 | 2020-05-26T18:03:46 | 2020-05-26T18:03:46 | 256,439,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | py | import sys, os, pickle
from pathlib import PurePath
current_dir = os.path.realpath(__file__)
p = PurePath(current_dir)
sys.path.append(str(p.parents[7])+'/metalearners/knn_ranking_method/RMSE')
from KNN_ranking_k_3_RMSE import KNN_ranking
#Load the selected meta-dataset after performing zero-variance threshold
with open(str(p.parents[7])+'/analysis/feature_selection/extra_trees/extra_trees_X_AUCPRC_202.pickle', 'rb') as handle:
metadataset_feature_selected = pickle.load(handle)
#=====================META-FEATURE EXTRACTION==================================
with open(str(p.parents[5])+'/actual/sensor_metafeatures_202.pickle', 'rb') as handle:
meta_features = pickle.load(handle)
#nested_results is a nested dictionary with all the AUC-PRC performances for each dataset and all models
with open(str(p.parents[6])+'/nested_results_prc.pickle', 'rb') as handle:
nested_results_prc = pickle.load(handle)
"""
Remove the meta-features which are not in the meta-dataset
(i.e. the features which have not been selected in the feature selection process)
"""
metafeatures_to_be_removed = []
for metafeature in meta_features.keys():
if metafeature in metadataset_feature_selected.columns:
pass
else:
metafeatures_to_be_removed.append(metafeature)
[meta_features.pop(key) for key in metafeatures_to_be_removed]
#========================META-LEARNING: RANKING================================
#KNN Ranking Method
top1, top2, top3 = KNN_ranking(metadataset_feature_selected, meta_features, nested_results_prc)
print("==========================================")
print(" AUC-PRC ")
print("==========================================")
print("Top 1 predicted model: " + top1)
print("Top 2 predicted model: " + top2)
print("Top 3 predicted model: " + top3)
#Actual results
with open(str(p.parents[5])+'/actual/sensor_top_3_prc.pickle', 'rb') as handle:
actual_results = pickle.load(handle)
print("==========================================")
print("Top 1 ACTUAL model: " + actual_results[0])
print("Top 2 ACTUAL model: " + actual_results[1])
print("Top 3 ACTUAL model: " + actual_results[2])
| [
"rl554@cam.ac.uk"
] | rl554@cam.ac.uk |
3df71c5457a9604d5a8d822930171a8250ec8280 | 66b1f3c3e57f53e1404d6e17c4acc850173a531d | /Python/Django/Calculator/calc_app/calc_app/settings.py | 015d84b659a6d1c67ad8a34c0caeac9554a8f063 | [] | no_license | bMedarski/SoftUni | ca4d6891b3bbe7b03aad5960d2f4af5479fd8bbd | 62cd9cb84b0826e3381c991882a4cdc27d94f8ab | refs/heads/master | 2021-06-08T17:32:39.282975 | 2020-02-04T11:57:08 | 2020-02-04T11:57:08 | 67,947,148 | 6 | 3 | null | 2021-05-06T20:35:42 | 2016-09-11T18:31:02 | Python | UTF-8 | Python | false | false | 3,094 | py | """
Django settings for calc_app project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '34!cf75&$en$qwof)oy(^620hq4m)_j2s77u%h!7y*snuac7b#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'calc_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'calc_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"boyamedarski@mail.bg"
] | boyamedarski@mail.bg |
06a1b2a6764dcc999c4dc021994100e23d2a2b93 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/duplicate_20200619182445.py | 9dae68d8081d41f90f083abc2f6f88532d71fbe0 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | def duplicate(str):
# brute force approach
# is storing the substring and checking how many times they
# occur
# dictionary key is the substring and occurrences is how many
# times its occurred
words ={}
# pointers
left = 0
right = 1
new = []
while right <= len(str) and left < len(str) -2:
if right == len(str):
left +=1
else:
right +=1
words[str[left:right]] = 0
for i in words:
words[i] = str.count(i)
i = 0
for i in words:
if words[i] >=2:
new.append(i)
duplicate("geeksforgeeks") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
4981bd2b9338bf1e9c7f1e12ac1b0cbbb93b7c11 | 7beff965d7b0e6155d6d52b27d71c557421d5ada | /abc160/e/a.py | d39046088669528af74eef2618f8b38a62258db5 | [] | no_license | uk-ar/competitive_programming | 82a53a1007798843ac006b9c7d313826e6cb45c3 | d2523cf303f47644cada3b03e9eed2349bdbe394 | refs/heads/master | 2023-03-28T13:20:07.728861 | 2021-03-30T20:25:55 | 2021-03-30T20:25:55 | 249,638,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | #!/usr/bin/env pypy3
import sys
sys.setrecursionlimit(15000)
x,y,a,b,c = map(int,input().split())
p = list(sorted(map(int,input().split()),reverse=True))[:x] # x
q = list(sorted(map(int,input().split()),reverse=True))[:y] # y
r = list(sorted(map(int,input().split()),reverse=True))
print(sum(sorted(p+q+r,reverse=True)[:x+y]))
| [
"yuuki.ari@gmail.com"
] | yuuki.ari@gmail.com |
4976f0269c542b42bd84c779b123bc15f165d539 | 92e3a6424326bf0b83e4823c3abc2c9d1190cf5e | /scripts/icehouse/opt/stack/tempest/tempest/api/compute/volumes/test_volumes_get.py | 4f77fa7dca18d39522c591d9e676dac128abe8d6 | [
"Apache-2.0"
] | permissive | AnthonyEzeigbo/OpenStackInAction | d6c21cf972ce2b1f58a93a29973534ded965d1ea | ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e | refs/heads/master | 2023-07-28T05:38:06.120723 | 2020-07-25T15:19:21 | 2020-07-25T15:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import matchers
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesGetTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
super(VolumesGetTestJSON, cls).resource_setup()
cls.client = cls.volumes_extensions_client
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.attr(type='smoke')
def test_volume_create_get_delete(self):
# CREATE, GET, DELETE Volume
volume = None
v_name = data_utils.rand_name('Volume-%s-') % self._interface
metadata = {'Type': 'work'}
# Create volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata=metadata)
self.addCleanup(self.delete_volume, volume['id'])
self.assertEqual(200, resp.status)
self.assertIn('id', volume)
self.assertIn('displayName', volume)
self.assertEqual(volume['displayName'], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
# Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
# GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
# Verification of details of fetched Volume
self.assertEqual(v_name,
fetched_volume['displayName'],
'The fetched Volume is different '
'from the created Volume')
self.assertEqual(volume['id'],
fetched_volume['id'],
'The fetched Volume is different '
'from the created Volume')
self.assertThat(fetched_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
class VolumesGetTestXML(VolumesGetTestJSON):
_interface = "xml"
| [
"cody@uky.edu"
] | cody@uky.edu |
7715d5679012483083ebf58aa1308f7a584f95d5 | fbfe37305712d49c84d87b2bb3ef88d3cf68cf69 | /apps/post/migrations/0001_initial.py | 1409752baa7f263d6454d8f513746baf8672b72a | [] | no_license | xal9wiii4ik/social_network | 50ba7788bbd4b53c77f69f61c5790c3a4a13b6e4 | 23d6c72bf4ed0e182570d0e55e973f22701850cd | refs/heads/master | 2023-03-30T22:03:28.379419 | 2021-04-08T21:55:56 | 2021-04-08T21:55:56 | 325,549,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | # Generated by Django 3.1.5 on 2021-01-27 13:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=99, unique=True, verbose_name='Тема')),
],
options={
'verbose_name': 'Тема',
'verbose_name_plural': 'Темы',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30, verbose_name='Заглавие')),
('body', models.TextField(max_length=1024, verbose_name='Тело поста')),
('published_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата публикации')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='post_owner', to=settings.AUTH_USER_MODEL)),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_subject', to='post.subject')),
],
options={
'verbose_name': 'Пост',
'verbose_name_plural': 'Посты',
},
),
]
| [
"xal9wIII4ik@yandex.ru"
] | xal9wIII4ik@yandex.ru |
ef6aef324a83544b3a9424e5c5c1b975b4f19d4d | 0e04214a06ef5b220f9179bd7b7a0792ea17145b | /genqr.py | 2f79d2caf4616f5d43d85dd3432ef354d949bf00 | [
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-gutenberg-2020"
] | permissive | tebeka/pythonwise | cbeb63d4f4b2ec8d1d0df392f721c1557d9b00f4 | e3e1cdca77224c0ff2cf1fba69d20c997224decc | refs/heads/master | 2022-11-19T03:59:17.736977 | 2022-11-08T07:22:20 | 2022-11-08T07:22:20 | 45,970,082 | 23 | 8 | BSD-3-Clause | 2021-04-21T04:54:40 | 2015-11-11T08:46:50 | Python | UTF-8 | Python | false | false | 1,580 | py | #!/usr/bin/env python
'''Generate QR code using Google Charts API'''
import sys
# Python 3/2 compatibility
if sys.version_info[:2] < (3, 0):
from urllib import urlopen, urlencode
import httplib
stdout = sys.stdout
else:
from urllib.request import urlopen
from urllib.parse import urlencode
import http.client as httplib
stdout = sys.stdout.buffer
def gen_qr(data, size):
charts_url = 'https://chart.googleapis.com/chart'
params = [
('cht', 'qr'),
('chs', size),
('chl', data),
]
query = urlencode(params)
url = '%s?%s' % (charts_url, query)
fo = urlopen(url)
if fo.code != httplib.OK:
raise ValueError('bad reply from Google %d' % fo.code)
return fo.read()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(
description='Generate QR using Google Charts (PNG)')
parser.add_argument('data', help='data to encode')
parser.add_argument('--out', '-o', help='output file name (stdin)',
default='-')
parser.add_argument('--size', '-s', help='image size (200x200)',
default='200x200')
args = parser.parse_args()
try:
img_data = gen_qr(args.data, args.size)
out = stdout if args.out == '-' else open(args.out, 'wb')
out.write(img_data)
except ValueError as err:
raise SystemExit('error: {}'.format(err))
except IOError as err:
raise SystemExit(
'error: cannot open {} for writing - {}'.format(args.out, err))
| [
"miki.tebeka@gmail.com"
] | miki.tebeka@gmail.com |
afccf24a53afcb8a7a26b451ffabd6f8218d208a | 07b37ca45d38edea112895049acf76d96ff07eff | /3.Processing&UnderstadingText/stemming.py | 7493e6263eb23c0111bcfc4069bbdb0ababcca3f | [] | no_license | KRBhavaniSankar/NLTK | e335944de346be72a01c92221b0bf58d85475fb9 | 4b228338566996fbccee72cb6afaa199a6496787 | refs/heads/master | 2020-03-12T23:03:59.981112 | 2018-05-11T01:15:28 | 2018-05-11T01:15:28 | 130,858,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | # -*- coding: utf-8 -*-
"""
Created on Fri APR 29 10:04 AM
@author: Bhavani
"""
"""
The NLTK package has several implementation for stemmers, These stemmers are implemented in the stem module , which inherits the stemmer interface in the nltk.stem.api module.
One of the most popular stemmers is the PorterStemmer. There also exists poter2 algorithm which is imporvements of original stemmig algorithm.
"""
#Porterstemmer
import collections
from nltk.stem import PorterStemmer
ps = PorterStemmer()
words_list = ["jumping","jumps","jumped","jump"]
for w in words_list:
print(ps.stem(w))
#print(ps.stem("lying"))
#print(ps.stem("strange"))
from nltk.stem import LancasterStemmer
ls = LancasterStemmer()
for w in words_list:
print(ls.stem(w))
stem_word_list = [ls.stem(w) for w in words_list]
print(stem_word_list.count('jump'))
print(stem_word_list)
print(ls.stem("lying"))
print(ls.stem("strange"))
"""
There are several other stemmers, including RegexpStemmer , where you can build
your own stemmer based on user-defined rules , and SnowballStemmer , which supports
stemming in 13 different languages besides English.
"""
#Regex Based stemmer
from nltk.stem import RegexpStemmer
rs = RegexpStemmer("ing$|s$|ed$",min=4)
for w in words_list:
print(rs.stem(w))
print(rs.stem("lying"))
print(rs.stem("strange"))
#Snow Ball stemmer
from nltk.stem import SnowballStemmer
ss = SnowballStemmer("german")
print("supported languages are :",SnowballStemmer.languages)
german_cars = "autobahnen"
print(ss.stem(german_cars)) | [
"krbhavanisankar@gmail.com"
] | krbhavanisankar@gmail.com |
dbe354eca1ad06148fab7434a384fd6262c32ff8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03845/s040519010.py | af55c2d9fab44bd279e0ea967a34ce3e0dfda0cd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import sys, os, math, bisect, itertools, collections, heapq, queue, copy, array
# from scipy.sparse.csgraph import csgraph_from_dense, floyd_warshall
# from decimal import Decimal
# from collections import defaultdict, deque
sys.setrecursionlimit(10000000)
ii = lambda: int(sys.stdin.buffer.readline().rstrip())
il = lambda: list(map(int, sys.stdin.buffer.readline().split()))
fl = lambda: list(map(float, sys.stdin.buffer.readline().split()))
iln = lambda n: [int(sys.stdin.buffer.readline().rstrip()) for _ in range(n)]
iss = lambda: sys.stdin.buffer.readline().decode().rstrip()
sl = lambda: list(map(str, sys.stdin.buffer.readline().decode().split()))
isn = lambda n: [sys.stdin.buffer.readline().decode().rstrip() for _ in range(n)]
lcm = lambda x, y: (x * y) // math.gcd(x, y)
MOD = 10 ** 9 + 7
INF = float('inf')
def main():
if os.getenv("LOCAL"):
sys.stdin = open("input.txt", "r")
N = ii()
T = il()
M = ii()
sm = sum(T)
for m in range(M):
P, X = il()
print(sm-(T[P-1]-X))
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9ed21cd4df7efe6eaadf95f108b7c5e71d1d8a07 | c7d0f590f3353b827ed34b731c2f6c53952a1f65 | /autocv/researcher.py | 7d8ad3376953e7b13829f6fe7f6647f44eb5fdb2 | [
"MIT"
] | permissive | Daniel-Gong/autoCV | 811285a15c913776ced6c2ca49e8b4c625514399 | ea620c88f46900bc177eb06775f001696c77a09d | refs/heads/master | 2023-06-26T11:53:17.831465 | 2021-08-05T17:16:01 | 2021-08-05T17:16:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,008 | py | """
class for a researcher
"""
import os
import json
import requests
import scholarly
import pypatent
from .orcid import get_dois_from_orcid_record
from .pubmed import get_pubmed_data
from .publication import JournalArticle, Book, BookChapter
from .crossref import get_crossref_records, parse_crossref_record
from .utils import get_additional_pubs_from_csv, CustomJSONEncoder, get_random_hash, drop_excluded_pubs
class Researcher:
def __init__(self, param_file='params.json', basedir=None):
self.param_file = param_file
self.load_params(param_file)
self.basedir = os.path.dirname(param_file) if basedir is None else basedir
self.orcid_data = None
self.orcid_dois = None
self.pubmed_data = None
self.crossref_data = None
self.gscholar_data = None
self.patent_data = None
self.serialized = None
self.publications = None
self.rendered_latex = None
def load_params(self, param_file):
if os.path.exists(param_file):
with open(param_file) as f:
params = json.load(f)
else:
raise FileNotFoundError("""Please create a json file called params.json
containing the fields email (with your email address), orcid (with your ORCID id)
and query (with your pubmed query)- see documentation for help')
""")
for field in params:
setattr(self, field, params[field])
def get_orcid_data(self, timeout=60):
orcid_url = "https://pub.orcid.org/v3.0/%s" % self.orcid
print('using ORCID URL:', orcid_url)
resp = requests.get(orcid_url,
headers={'Accept': 'application/vnd.orcid+json'},
timeout=timeout)
self.orcid_data = resp.json()
def get_orcid_dois(self):
if self.orcid_data is None:
self.get_orcid_data()
self.orcid_dois = get_dois_from_orcid_record(self.orcid_data)
def get_pubmed_data(self):
self.pubmed_data = get_pubmed_data(self.query, self.email)
print('retrieved %d full pubmed records' % len(self.pubmed_data['PubmedArticle']))
def get_google_scholar_record(self):
search_query = scholarly.scholarly.search_author(
' '.join([self.firstname, self.lastname]))
query_resp = next(search_query)
self.gscholar_data = scholarly.scholarly.fill(query_resp)
def make_publication_records(self, use_exclusions=True):
# test pubmed
self.get_pubmed_data()
pubmed_dois = []
self.publications = {}
for r in self.pubmed_data['PubmedArticle']:
pub = JournalArticle()
pub.from_pubmed(r)
pub.format_reference_latex()
pub.hash = pub.get_pub_hash()
self.publications[pub.DOI] = pub
# keep track of pubmed DOIs so that we
# don't overwrite with crossref
pubmed_dois.append(pub.DOI)
if self.orcid_data is None:
self.get_orcid_data()
if self.orcid_dois is None:
self.get_orcid_dois()
print('found %d ORCID dois' % len(self.orcid_dois))
# load orcid pubs using crossref
self.crossref_data = get_crossref_records(self.orcid_dois)
print('found %d crossref records' % len(self.crossref_data))
for c in self.crossref_data:
d = parse_crossref_record(self.crossref_data[c])
if d is not None:
# skip existing pubmed records and preprints
if d['DOI'] in pubmed_dois:
continue
if d['type'] in ['journal-article', 'proceedings-article']:
p = JournalArticle()
elif d['type'] in ['book', 'monograph']:
p = Book()
elif d['type'] == 'book-chapter':
p = BookChapter()
else:
continue
p.from_dict(d)
if hasattr(p, 'DOI'):
id = p.DOI
elif hasattr(p, 'ISBN'):
id = p.ISBN
else:
id = get_random_hash()
self.publications[id] = p
if use_exclusions:
self.publications = drop_excluded_pubs(self.publications)
print('found %d additional pubs from ORCID via crossref' % (len(self.publications) - len(pubmed_dois)))
additional_pubs_file = os.path.join(
self.basedir, 'additional_pubs.csv'
)
additional_pubs = get_additional_pubs_from_csv(additional_pubs_file)
for pub in additional_pubs:
if additional_pubs[pub]['type'] in ['journal-article', 'proceedings-article']:
self.publications[pub] = JournalArticle()
elif additional_pubs[pub]['type'] in ['book', 'monograph']:
self.publications[pub] = Book()
elif additional_pubs[pub]['type'] == 'book-chapter':
self.publications[pub] = BookChapter()
else:
print('skipping unknown type', additional_pubs[pub]['type'])
continue
self.publications[pub].from_dict(additional_pubs[pub])
def get_patents(self):
results = pypatent.Search(self.lastname).as_list()
self.patent_data = []
for r in results:
for i in r['inventors']:
fn = i[0].split(' ')[0].lower()
ln = i[1].lower()
if fn == self.firstname.lower() and ln == self.lastname.lower():
self.patent_data.append(r)
def from_json(self, filename):
with open(filename, 'r') as f:
serialized = json.load(f)
for k in serialized.keys():
if hasattr(self, k):
print('ingesting', k)
if k == 'publications':
self.publications = {}
for pub in serialized[k]:
if serialized[k][pub]['type'] in ['journal-article', 'proceedings-article']:
self.publications[pub] = JournalArticle()
elif serialized[k][pub]['type'] in ['book', 'monograph']:
self.publications[pub] = Book()
elif serialized[k][pub]['type'] == 'book-chapter':
self.publications[pub] = BookChapter()
else:
print('skipping unknown type', serialized[k][pub]['type'])
continue
self.publications[pub].from_dict(serialized[k][pub])
else:
setattr(self, k, serialized[k])
def serialize(self):
self.serialized = {}
self_dict = self.__dict__.copy()
if 'gscholar_data' in self_dict:
self.serialized['gscholar_data'] = {
'hindex': self_dict['gscholar_data']['hindex']}
self.serialized['publications'] = {}
for k, pubinfo_orig in self.publications.items():
pubinfo = pubinfo_orig.to_json()
if len(pubinfo) == 0:
print('skipping', k)
continue
else:
print('keeping', k)
# fields_to_drop = []
# for kk, subfields in pubinfo.items():
# try:
# _ = json.dumps(subfields)
# except:
# fields_to_drop.append(kk)
# for f in fields_to_drop:
# del pubinfo[f]
self.serialized['publications'][k] = pubinfo # .to_json()
def to_json(self, filename):
if self.serialized is None:
self.serialize()
with open(filename, 'w') as f:
json.dump(self.serialized, f, cls=CustomJSONEncoder,
indent=4)
| [
"poldrack@gmail.com"
] | poldrack@gmail.com |
5a0afac41b222f0e907588c7a0cd449a2394cad6 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AntMerchantExpandMerchantTypeQueryModel.py | 73349409d748e7a00a24a09e560029204227e6f5 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 890 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntMerchantExpandMerchantTypeQueryModel(object):
def __init__(self):
self._user_id = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandMerchantTypeQueryModel()
if 'user_id' in d:
o.user_id = d['user_id']
return o
| [
"ben.zy@antfin.com"
] | ben.zy@antfin.com |
6ac230540f541b35846d2ee4981281291f47efc3 | 987697512ce9b8d7c29bfd2f18d5aec0261a6863 | /二叉树的层次遍历II.py | b6a34e636804c1c42d8861937870a5d1564c2981 | [] | no_license | Luckyaxah/leetcode-python | 65e7ff59d6f19312defdc4d4b4103c39193b198a | 2b9c78ba88e7bf74a46a287fb1914b4d6ba9af38 | refs/heads/master | 2023-06-05T12:15:31.618879 | 2021-06-22T13:05:30 | 2021-06-22T13:05:30 | 262,287,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from 二叉树类 import TreeNode
class Solution:
def levelOrderBottom(self, root: TreeNode) :
if not root:
return [[]]
ret = []
def fun(root,level):
if not root:
return
if len(ret)<level+1:
ret.append([root.val])
else:
ret[level].append(root.val)
fun(root.left,level+1)
fun(root.right,level+1)
fun(root,0)
return list(reversed(ret))
if __name__ == "__main__":
a = Solution()
t = TreeNode([3,9,20,None,None,15,7])
print(a.levelOrderBottom(t)) | [
"math_leqi@163.com"
] | math_leqi@163.com |
e184a01c1505720f9c09cf93c097dc09449403fa | f707303e4dfe383cf82c23a6bb42ccfdc4cfdb67 | /pandas-ml-utils/pandas_ml_utils_test/ml/model/test_skmodel_accuracy.py | 14b0da71b9536c9f63f59bff4a5256abf2c77f00 | [
"MIT"
] | permissive | jcoffi/pandas-ml-quant | 1830ec256f8c09c04f1aa77e2eecfba07d34fe68 | 650a8e8f77bc4d71136518d1c7ee65c194a99cf0 | refs/heads/master | 2023-08-31T06:45:38.060737 | 2021-09-09T04:44:35 | 2021-09-09T04:44:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | from pandas_ml_utils import FittingParameter
from pandas_ml_utils_test.ml.model.test_model_accuracy import TestModelAccuracy
class TestSkModelAccuracy(TestModelAccuracy):
def provide_linear_regression_model(self):
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from pandas_ml_utils import FeaturesAndLabels, SkModel
return [
(
SkModel(LinearRegression(), FeaturesAndLabels(["x"], ["y"])),
FittingParameter(epochs=1, fold_epochs=1, context="LinearRegression")
),
(
SkModel(
MLPRegressor(10, learning_rate_init=0.01, max_iter=9000, validation_fraction=0),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=1, fold_epochs=1, context="MLPRegressor")
),
(
SkModel(
MLPRegressor(10, learning_rate_init=0.01, max_iter=1, validation_fraction=0, warm_start=True),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=9000, fold_epochs=1, context="MLPRegressor partial fit")
)
]
def provide_non_linear_regression_model(self):
from sklearn.neural_network import MLPRegressor
from pandas_ml_utils import FeaturesAndLabels, SkModel
return [
(
SkModel(
MLPRegressor(200, learning_rate_init=0.001, max_iter=5000, validation_fraction=0),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=1, context="epoch 1 fit"),
),
(
SkModel(
MLPRegressor(200, learning_rate_init=0.001, max_iter=1, validation_fraction=0, warm_start=True),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=5000, context="partial fit"),
)
]
| [
"kic@kic.kic"
] | kic@kic.kic |
7aa7cb3ed8f101f228aa9076e18398a6f5937a6e | b1ddd313527e84ace13729c7f0ad6953f254d0f1 | /tester.py | d2cb0273fb26c0047f5e0d3b91d0945a072b4bd6 | [] | no_license | sander76/weasy-server | bc4e4d98aedab52037e831fed55993d1be27db8c | f8196d382ca7abba4156d6f62a0371b9b2ad05f2 | refs/heads/master | 2020-06-28T22:57:23.783768 | 2016-12-02T10:05:47 | 2016-12-02T10:05:47 | 74,461,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from weasyprint import HTML
import logging
logging.basicConfig(level=logging.DEBUG)
URL = "http://tools.hde.nl/menc/site/guides/Pliss%C3%A9%20%26%20Duette%C2%AE%20Bottom-Up%20programming/"
OUT = "/home/admin-s/test.pdf"
for lp in range(0, 300):
try:
HTML(URL).write_pdf(OUT)
except OSError as e:
logging.exception("**************** ERROR AT ATTEMPT: {} *******************".format(lp))
break
| [
"s.teunissen@gmail.com"
] | s.teunissen@gmail.com |
4ad300b3af57d974576a31de15ca71fa38cfe7c8 | 8d35b8aa63f3cae4e885e3c081f41235d2a8f61f | /discord/ext/dl/extractor/cnbc.py | 90c89123e5bbed7130c3977526254dd0beb26afc | [
"MIT"
] | permissive | alexyy802/Texus | 1255f4e54c8d3cc067f0d30daff1cf24932ea0c9 | c282a836f43dfd588d89d5c13f432896aebb540f | refs/heads/master | 2023-09-05T06:14:36.217601 | 2021-11-21T03:39:55 | 2021-11-21T03:39:55 | 429,390,575 | 0 | 0 | MIT | 2021-11-19T09:22:22 | 2021-11-18T10:43:11 | Python | UTF-8 | Python | false | false | 2,370 | py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import smuggle_url
class CNBCIE(InfoExtractor):
_VALID_URL = r"https?://video\.cnbc\.com/gallery/\?video=(?P<id>[0-9]+)"
_TEST = {
"url": "http://video.cnbc.com/gallery/?video=3000503714",
"info_dict": {
"id": "3000503714",
"ext": "mp4",
"title": "Fighting zombies is big business",
"description": "md5:0c100d8e1a7947bd2feec9a5550e519e",
"timestamp": 1459332000,
"upload_date": "20160330",
"uploader": "NBCU-CNBC",
},
"params": {
# m3u8 download
"skip_download": True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
return {
"_type": "url_transparent",
"ie_key": "ThePlatform",
"url": smuggle_url(
"http://link.theplatform.com/s/gZWlPC/media/guid/2408950221/%s?mbr=true&manifest=m3u"
% video_id,
{"force_smil_url": True},
),
"id": video_id,
}
class CNBCVideoIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?cnbc\.com(?P<path>/video/(?:[^/]+/)+(?P<id>[^./?#&]+)\.html)"
_TEST = {
"url": "https://www.cnbc.com/video/2018/07/19/trump-i-dont-necessarily-agree-with-raising-rates.html",
"info_dict": {
"id": "7000031301",
"ext": "mp4",
"title": "Trump: I don't necessarily agree with raising rates",
"description": "md5:878d8f0b4ebb5bb1dda3514b91b49de3",
"timestamp": 1531958400,
"upload_date": "20180719",
"uploader": "NBCU-CNBC",
},
"params": {
"skip_download": True,
},
}
def _real_extract(self, url):
path, display_id = re.match(self._VALID_URL, url).groups()
video_id = self._download_json(
"https://webql-redesign.cnbcfm.com/graphql",
display_id,
query={
"query": """{
page(path: "%s") {
vcpsId
}
}"""
% path,
},
)["data"]["page"]["vcpsId"]
return self.url_result(
"http://video.cnbc.com/gallery/?video=%d" % video_id, CNBCIE.ie_key()
)
| [
"noreply@github.com"
] | alexyy802.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.