blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cfdc5c14013630e610a586b320bf708cda421dd0
|
871107e8ac9e7631057c9c9b02d3fd733e00fe2c
|
/Client/screens/introscreen.py
|
602202fdfd03760e173b0f4593127a50fce4d43b
|
[] |
no_license
|
jamesfowkes/Snackspace
|
40993cfc26b23463939ebaa8fa37f666086be3c9
|
d93633aaf25039b9a548b073e69d8f23c94b71e3
|
refs/heads/master
| 2018-12-30T10:56:48.786485
| 2013-10-24T20:49:04
| 2013-10-24T20:49:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
"""
introscreen.py
The first screen to be displayed when snackspace starts.
"""
from .displayconstants import Colours, Screens
from .screen import Screen
from .introscreen_gui import IntroScreenGUI
class IntroScreen(Screen, IntroScreenGUI):
""" Implementation of introduction screen """
def __init__(self, width, height, manager, owner):
Screen.__init__(self, manager, owner, Screens.INTROSCREEN)
IntroScreenGUI.__init__(self, width, height, self)
def _update_on_active(self):
pass
def on_rfid(self):
""" Handle RFID swipe: just request a switch to main screen """
if self.active:
self.screen_manager.req(Screens.MAINSCREEN)
def on_bad_rfid(self):
""" Do nothing on touchscreen press """
pass
def on_gui_event(self, pos):
""" Do nothing on touchscreen press """
pass
def on_key_event(self, key):
""" Do nothing on keyboard press """
pass
def on_scan(self, __product):
""" Handle barcode scan: just request a switch to main screen """
if self.active:
self.screen_manager.req(Screens.MAINSCREEN)
def on_bad_scan(self, __barcode):
""" Handle invalid barcode scan: show a banner """
if self.active:
self.set_banner_with_timeout("Unknown barcode: '%s'" % __barcode, 4, Colours.ERR, self._banner_timeout)
self._request_redraw()
def set_db_state(self, db_connected):
""" Handle change of database state: update GUI to reflect """
if not db_connected:
self.set_intro_text("ERROR: Cannot access Snackspace remote database", Colours.ERR)
else:
self.set_intro_text("Scan an item or swipe your card to start", Colours.FG)
self._request_redraw()
def _banner_timeout(self):
""" Callback from GUI indicating banner has timed out """
self.hide_banner()
self._request_redraw()
def _request_redraw(self):
""" Push a request for this screen to be drawn again """
self.screen_manager.req(self.screen_id)
|
[
"jamesfowkes@gmail.com"
] |
jamesfowkes@gmail.com
|
3dd8660eed53064c4439f1f5d6692774495f1ed8
|
4e139c024f09eb547304c2cb2d1399a334f66c92
|
/wikigen/settings.py
|
615f81170857207c997a2aa14e95fc6fc85ecc86
|
[] |
no_license
|
epochx/PEER
|
04dd77fd638858fe285c9fcee3ad6a4ccd283e9a
|
de52c45d121b63dee28f72b68de2625c8ec2bb66
|
refs/heads/master
| 2023-02-27T15:29:09.086756
| 2021-02-01T07:54:29
| 2021-02-01T07:54:29
| 264,897,876
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
#!/usr/bin/env python
# -*-coding: utf8 -*-
import os
CODE_ROOT = os.path.dirname(os.path.realpath(__file__))
HOME = os.environ["HOME"]
DATA_PATH = os.path.join(HOME, "data", "PEER")
SPLITS_PATH = os.path.join(DATA_PATH, "splits")
EDITS_PATH = os.path.join(DATA_PATH, "edits")
RESULTS_PATH = os.path.join(HOME, "results", "PEER")
_DB_NAME = "runs.db"
PARAM_IGNORE_LIST = [
"results_path",
"overwrite",
"force_dataset_reload",
"verbose",
"write_mode",
]
DATABASE_CONNECTION_STRING = "sqlite:///" + os.path.join(RESULTS_PATH, _DB_NAME)
try:
DATASET_NAMES = [
name.replace(".jsonl", "") for name in os.listdir(EDITS_PATH)
]
except FileNotFoundError:
DATASET_NAMES = []
|
[
"edison_marrese@hotmail.com"
] |
edison_marrese@hotmail.com
|
d6b5386e2ea175f5f51171bffa0c9efee0e4d949
|
c9094a4ed256260bc026514a00f93f0b09a5d60c
|
/homeassistant/helpers/debounce.py
|
23727c2a00fe774c1bdc55970457e11e4ddccf7a
|
[
"Apache-2.0"
] |
permissive
|
turbokongen/home-assistant
|
824bc4704906ec0057f3ebd6d92788e096431f56
|
4ab0151fb1cbefb31def23ba850e197da0a5027f
|
refs/heads/dev
| 2023-03-12T05:49:44.508713
| 2021-02-17T14:06:16
| 2021-02-17T14:06:16
| 50,231,140
| 4
| 1
|
Apache-2.0
| 2023-02-22T06:14:30
| 2016-01-23T08:55:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,861
|
py
|
"""Debounce helper."""
import asyncio
from logging import Logger
from typing import Any, Awaitable, Callable, Optional
from homeassistant.core import HassJob, HomeAssistant, callback
class Debouncer:
"""Class to rate limit calls to a specific command."""
def __init__(
self,
hass: HomeAssistant,
logger: Logger,
*,
cooldown: float,
immediate: bool,
function: Optional[Callable[..., Awaitable[Any]]] = None,
):
"""Initialize debounce.
immediate: indicate if the function needs to be called right away and
wait <cooldown> until executing next invocation.
function: optional and can be instantiated later.
"""
self.hass = hass
self.logger = logger
self._function = function
self.cooldown = cooldown
self.immediate = immediate
self._timer_task: Optional[asyncio.TimerHandle] = None
self._execute_at_end_of_timer: bool = False
self._execute_lock = asyncio.Lock()
self._job: Optional[HassJob] = None if function is None else HassJob(function)
@property
def function(self) -> Optional[Callable[..., Awaitable[Any]]]:
"""Return the function being wrapped by the Debouncer."""
return self._function
@function.setter
def function(self, function: Callable[..., Awaitable[Any]]) -> None:
"""Update the function being wrapped by the Debouncer."""
self._function = function
if self._job is None or function != self._job.target:
self._job = HassJob(function)
async def async_call(self) -> None:
"""Call the function."""
assert self._job is not None
if self._timer_task:
if not self._execute_at_end_of_timer:
self._execute_at_end_of_timer = True
return
# Locked means a call is in progress. Any call is good, so abort.
if self._execute_lock.locked():
return
if not self.immediate:
self._execute_at_end_of_timer = True
self._schedule_timer()
return
async with self._execute_lock:
# Abort if timer got set while we're waiting for the lock.
if self._timer_task:
return
task = self.hass.async_run_hass_job(self._job)
if task:
await task
self._schedule_timer()
async def _handle_timer_finish(self) -> None:
"""Handle a finished timer."""
assert self._job is not None
self._timer_task = None
if not self._execute_at_end_of_timer:
return
self._execute_at_end_of_timer = False
# Locked means a call is in progress. Any call is good, so abort.
if self._execute_lock.locked():
return
async with self._execute_lock:
# Abort if timer got set while we're waiting for the lock.
if self._timer_task:
return # type: ignore
try:
task = self.hass.async_run_hass_job(self._job)
if task:
await task
except Exception: # pylint: disable=broad-except
self.logger.exception("Unexpected exception from %s", self.function)
self._schedule_timer()
@callback
def async_cancel(self) -> None:
"""Cancel any scheduled call."""
if self._timer_task:
self._timer_task.cancel()
self._timer_task = None
self._execute_at_end_of_timer = False
@callback
def _schedule_timer(self) -> None:
"""Schedule a timer."""
self._timer_task = self.hass.loop.call_later(
self.cooldown,
lambda: self.hass.async_create_task(self._handle_timer_finish()),
)
|
[
"noreply@github.com"
] |
turbokongen.noreply@github.com
|
81608d1980eaa0bcd10242f77e99c0a5aad22b73
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-apm/huaweicloudsdkapm/v1/model/delete_app_request.py
|
7b641893dbfb446555ef0b94333f7954ceaf7ae2
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,007
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DeleteAppRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'application_id': 'int',
'x_business_id': 'int'
}
attribute_map = {
'application_id': 'application_id',
'x_business_id': 'x-business-id'
}
def __init__(self, application_id=None, x_business_id=None):
"""DeleteAppRequest
The model defined in huaweicloud sdk
:param application_id: 组件id。
:type application_id: int
:param x_business_id: 应用id,用于鉴权。
:type x_business_id: int
"""
self._application_id = None
self._x_business_id = None
self.discriminator = None
self.application_id = application_id
self.x_business_id = x_business_id
@property
def application_id(self):
"""Gets the application_id of this DeleteAppRequest.
组件id。
:return: The application_id of this DeleteAppRequest.
:rtype: int
"""
return self._application_id
@application_id.setter
def application_id(self, application_id):
"""Sets the application_id of this DeleteAppRequest.
组件id。
:param application_id: The application_id of this DeleteAppRequest.
:type application_id: int
"""
self._application_id = application_id
@property
def x_business_id(self):
"""Gets the x_business_id of this DeleteAppRequest.
应用id,用于鉴权。
:return: The x_business_id of this DeleteAppRequest.
:rtype: int
"""
return self._x_business_id
@x_business_id.setter
def x_business_id(self, x_business_id):
"""Sets the x_business_id of this DeleteAppRequest.
应用id,用于鉴权。
:param x_business_id: The x_business_id of this DeleteAppRequest.
:type x_business_id: int
"""
self._x_business_id = x_business_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteAppRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
cbd694d67c5985c58ecb43d81a9b857bc29e0727
|
1afa1b1929d1cd463cd9970174dd58ce2ca6eb1e
|
/configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py
|
a5507b4beaae675a1e5075a8fbd154ca5b5265c5
|
[
"Apache-2.0"
] |
permissive
|
CAU-HE/CMCDNet
|
2328594bf4b883384c691099c72e119b65909121
|
31e660f81f3b625916a4c4d60cd606dcc8717f81
|
refs/heads/main
| 2023-08-08T17:21:57.199728
| 2023-07-28T07:34:40
| 2023-07-28T07:34:40
| 589,927,845
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
_base_ = './lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py'
norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True)
model = dict(
type='EncoderDecoder',
backbone=dict(
type='MobileNetV3',
arch='small',
out_indices=(0, 1, 12),
norm_cfg=norm_cfg),
decode_head=dict(
type='LRASPPHead',
in_channels=(16, 16, 576),
in_index=(0, 1, 2),
channels=128,
input_transform='multiple_select',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
|
[
"flyhxn@qq.com"
] |
flyhxn@qq.com
|
df348e14d755b39f5502493836abc26b73a94792
|
8bbeb7b5721a9dbf40caa47a96e6961ceabb0128
|
/python3/324.Wiggle Sort II(摆动排序 II).py
|
d276668f063a9cc860cd18310872f77ccd38876f
|
[
"MIT"
] |
permissive
|
lishulongVI/leetcode
|
bb5b75642f69dfaec0c2ee3e06369c715125b1ba
|
6731e128be0fd3c0bdfe885c1a409ac54b929597
|
refs/heads/master
| 2020-03-23T22:17:40.335970
| 2018-07-23T14:46:06
| 2018-07-23T14:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
"""
<p>Given an unsorted array <code>nums</code>, reorder it such that <code>nums[0] < nums[1] > nums[2] < nums[3]...</code>.</p>
<p><b>Example 1:</b></p>
<pre>
<strong>Input: </strong><code>nums = [1, 5, 1, 1, 6, 4]</code>
<strong>Output: </strong>One possible answer is <code>[1, 4, 1, 5, 1, 6]</code>.</pre>
<p><b>Example 2:</b></p>
<pre>
<strong>Input: </strong><code>nums = [1, 3, 2, 2, 3, 1]</code>
<strong>Output:</strong> One possible answer is <code>[2, 3, 1, 3, 1, 2]</code>.</pre>
<p><b>Note:</b><br />
You may assume all input has valid answer.</p>
<p><b>Follow Up:</b><br />
Can you do it in O(n) time and/or in-place with O(1) extra space?</p>
<p>给定一个无序的数组 <code>nums</code>,将它重新排列成 <code>nums[0] < nums[1] > nums[2] < nums[3]...</code> 的顺序。</p>
<p><strong>示例 1:</strong></p>
<pre><strong>输入: </strong><code>nums = [1, 5, 1, 1, 6, 4]</code>
<strong>输出: </strong>一个可能的答案是 <code>[1, 4, 1, 5, 1, 6]</code></pre>
<p><strong>示例 2:</strong></p>
<pre><strong>输入: </strong><code>nums = [1, 3, 2, 2, 3, 1]</code>
<strong>输出:</strong> 一个可能的答案是 <code>[2, 3, 1, 3, 1, 2]</code></pre>
<p><strong>说明:</strong><br>
你可以假设所有输入都会得到有效的结果。</p>
<p><strong>进阶:</strong><br>
你能用 O(n) 时间复杂度和 / 或原地 O(1) 额外空间来实现吗?</p>
<p>给定一个无序的数组 <code>nums</code>,将它重新排列成 <code>nums[0] < nums[1] > nums[2] < nums[3]...</code> 的顺序。</p>
<p><strong>示例 1:</strong></p>
<pre><strong>输入: </strong><code>nums = [1, 5, 1, 1, 6, 4]</code>
<strong>输出: </strong>一个可能的答案是 <code>[1, 4, 1, 5, 1, 6]</code></pre>
<p><strong>示例 2:</strong></p>
<pre><strong>输入: </strong><code>nums = [1, 3, 2, 2, 3, 1]</code>
<strong>输出:</strong> 一个可能的答案是 <code>[2, 3, 1, 3, 1, 2]</code></pre>
<p><strong>说明:</strong><br>
你可以假设所有输入都会得到有效的结果。</p>
<p><strong>进阶:</strong><br>
你能用 O(n) 时间复杂度和 / 或原地 O(1) 额外空间来实现吗?</p>
"""
class Solution:
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
|
[
"lishulong@wecash.net"
] |
lishulong@wecash.net
|
74780552abdcb29cbb2ff55c2bfafc323a3c67a6
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/PostenMapping/Model/Post060371615.py
|
f8c41c27b0114312043dc81cf23f0e87d9548eb3
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,410
|
py
|
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060371615(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.71615',
beschrijving='Gezaagde natuursteentegels, gebruiksklasse 6 volgens 6-3.8, 150 x 150, 50 mm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel.afwerking',
dotnotation='afwerking',
defaultWaarde='gezaagd',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.71615')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel.afmetingVanBestratingselementLxB',
dotnotation='afmetingVanBestratingselementLxB',
defaultWaarde='150-x-150',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.71615')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotation='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.71615')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel.gebruiksklasse',
dotnotation='gebruiksklasse',
defaultWaarde='6',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.71615')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotation='dikte',
defaultWaarde='5',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.71615')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanNatuursteentegel',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotation='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.71615')])
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
27bbcd1397596207d47f148c356105873636a5e2
|
b76ae361ab277923d0fed969b795074a1ecb400b
|
/project/python_fullstack/day13/property.py
|
77fec9d5cf94c99f3b42f8fbcc1616c6cbd3e6c0
|
[] |
no_license
|
RobotNo42/old_coed
|
995df921e31d5a9b65f1609380235330edb546ad
|
59f82e5d58965dd5c6340f4daf4ef43d1d311252
|
refs/heads/master
| 2021-07-18T00:07:33.450173
| 2020-06-16T13:51:11
| 2020-06-16T13:51:11
| 180,384,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
class People:
def __init__(self, name, age, weight, height):
self.name = name
self.age =age
self.weight = weight
self.height = height
@property
def dmi(self):
d = self.weight / (self.height ** 2)
return round(d, 2)
g = People('wzc', 20, 75, 1.7)
print(g.dmi)
|
[
"chengge1124@gmail.com"
] |
chengge1124@gmail.com
|
22acc2dddc23d9c0fc2317a07bff8b04cb194b7e
|
580f9928174741d07720141107879878091a2640
|
/dlib-example/face-detector/cnn_face_detector.py
|
01f7d8ae4592d4d0937b596b43b56ea97a2940fc
|
[] |
no_license
|
zengzhiying/machine_learning
|
08aa323e4bcf42f208ce81297249ceed9818844c
|
7b6a3c19580f000b1fa4ae54305749c3857b91a6
|
refs/heads/master
| 2023-02-27T20:08:43.384000
| 2021-02-07T14:20:48
| 2021-02-07T14:20:48
| 99,288,444
| 3
| 1
| null | 2020-10-12T22:02:52
| 2017-08-04T01:07:32
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 3,819
|
py
|
#!/usr/bin/python3
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example shows how to run a CNN based face detector using dlib. The
# example loads a pretrained model and uses it to find faces in images. The
# CNN model is much more accurate than the HOG based model shown in the
# face_detector.py example, but takes much more computational power to
# run, and is meant to be executed on a GPU to attain reasonable speed.
#
# You can download the pre-trained model from:
# http://dlib.net/files/mmod_human_face_detector.dat.bz2
#
# The examples/faces folder contains some jpg images of people. You can run
# this program on them and see the detections by executing the
# following command:
# ./cnn_face_detector.py mmod_human_face_detector.dat ../examples/faces/*.jpg
#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
#
# Compiling dlib should work on any operating system so long as you have
# CMake installed. On Ubuntu, this can be done easily by running the
# command:
# sudo apt-get install cmake
#
# Also note that this example requires Numpy which can be installed
# via the command:
# pip install numpy
import os
import sys
import time
import cv2
import dlib
# if len(sys.argv) < 3:
# print(
# "Call this program like this:\n"
# " ./cnn_face_detector.py mmod_human_face_detector.dat ../examples/faces/*.jpg\n"
# "You can get the mmod_human_face_detector.dat file from:\n"
# " http://dlib.net/files/mmod_human_face_detector.dat.bz2")
# exit()
cnn_face_detector = dlib.cnn_face_detection_model_v1('mmod_human_face_detector.dat')
# win = dlib.image_window()
image_file = sys.argv[1]
if os.path.isfile(image_file):
print("Processing file: {}".format(image_file))
t1 = time.time()
img = dlib.load_rgb_image(image_file)
print("load image time: {:.3f}s".format(time.time() - t1))
t1 = time.time()
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more
# faces.
dets = cnn_face_detector(img, 1)
print("detect image time: {:.3f}s".format(time.time() - t1))
t1 = time.time()
'''
This detector returns a mmod_rectangles object. This object contains a list of mmod_rectangle objects.
These objects can be accessed by simply iterating over the mmod_rectangles object
The mmod_rectangle object has two member variables, a dlib.rectangle object, and a confidence score.
It is also possible to pass a list of images to the detector.
- like this: dets = cnn_face_detector([image list], upsample_num, batch_size = 128)
In this case it will return a mmod_rectangless object.
This object behaves just like a list of lists and can be iterated over.
'''
print("Number of faces detected: {}".format(len(dets)))
img = cv2.imread(image_file)
for i, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {} Confidence: {}".format(
i, d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom(), d.confidence))
cv2.rectangle(img, (d.rect.left(), d.rect.top()), (d.rect.right(), d.rect.bottom()), (0, 0, 255), 3)
cv2.imwrite(f'{image_file[:-4]}_box.jpg', img)
print("rect image time: {:.3f}s".format(time.time() - t1))
# rects = dlib.rectangles()
# rects.extend([d.rect for d in dets])
# win.clear_overlay()
# win.set_image(img)
# win.add_overlay(rects)
# dlib.hit_enter_to_continue()
|
[
"yingzhi_zeng@126.com"
] |
yingzhi_zeng@126.com
|
5523a954cd910bce70946a3ab248b803adcc2d11
|
6413fe58b04ac2a7efe1e56050ad42d0e688adc6
|
/tempenv/lib/python3.7/site-packages/dash_html_components/Tbody.py
|
6c69bce7274c8ffc23ad59c3a25b5f69c48d2621
|
[
"MIT"
] |
permissive
|
tytechortz/Denver_temperature
|
7f91e0ac649f9584147d59193568f6ec7efe3a77
|
9d9ea31cd7ec003e8431dcbb10a3320be272996d
|
refs/heads/master
| 2022-12-09T06:22:14.963463
| 2019-10-09T16:30:52
| 2019-10-09T16:30:52
| 170,581,559
| 1
| 0
|
MIT
| 2022-06-21T23:04:21
| 2019-02-13T21:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 5,310
|
py
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Tbody(Component):
"""A Tbody component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- n_clicks (number; optional): An integer that represents the number of times
that this element has been clicked on.
- n_clicks_timestamp (number; optional): An integer that represents the time (in ms since 1970)
at which n_clicks changed. This can be used to tell
which button was changed most recently.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- role (string; optional): The ARIA role attribute
- data-* (string; optional): A wildcard data attribute
- aria-* (string; optional): A wildcard aria attribute
- accessKey (string; optional): Defines a keyboard shortcut to activate or add focus to the element.
- className (string; optional): Often used with CSS to style elements with common properties.
- contentEditable (string; optional): Indicates whether the element's content is editable.
- contextMenu (string; optional): Defines the ID of a <menu> element which will serve as the element's context menu.
- dir (string; optional): Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)
- draggable (string; optional): Defines whether the element can be dragged.
- hidden (string; optional): Prevents rendering of given element, while keeping child elements, e.g. script elements, active.
- lang (string; optional): Defines the language used in the element.
- spellCheck (string; optional): Indicates whether spell checking is allowed for the element.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- tabIndex (string; optional): Overrides the browser's default tab order and follows the one specified instead.
- title (string; optional): Text to be displayed in a tooltip when hovering over the element.
Available events: 'click'"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title']
self._type = 'Tbody'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_events = ['click']
self.available_properties = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Tbody, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('Tbody(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'Tbody(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
|
[
"jmswank7@gmail.com"
] |
jmswank7@gmail.com
|
ad965770425ffb7514c7eae370f4dda72d70789c
|
a29b8d6ae6642ef80d04ae99d721b703de06db69
|
/maro/rl/rl_component/rl_component_bundle.py
|
cd18accdf0d03c47cf335a0de68f3444b0a88656
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/maro
|
6aab1a4e86fddabf7f242f0d1020d985a5f7a5f3
|
b3c6a589ad9036b03221e776a6929b2bc1eb4680
|
refs/heads/master
| 2023-08-24T16:52:38.250279
| 2023-05-15T04:31:58
| 2023-05-15T04:31:58
| 230,389,247
| 764
| 158
|
MIT
| 2023-07-25T20:59:06
| 2019-12-27T06:48:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,598
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any, Dict, List
from maro.rl.policy import AbsPolicy, RLPolicy
from maro.rl.rollout import AbsEnvSampler
from maro.rl.training import AbsTrainer
from maro.rl.workflows.callback import Callback
class RLComponentBundle:
"""Bundle of all necessary components to run a RL job in MARO.
env_sampler (AbsEnvSampler): Environment sampler of the scenario.
agent2policy (Dict[Any, str]): Agent name to policy name mapping of the RL job. For example:
{agent1: policy1, agent2: policy1, agent3: policy2}.
policies (List[AbsPolicy]): Policies.
trainers (List[AbsTrainer]): Trainers.
device_mapping (Dict[str, str], default=None): Device mapping that identifying which device to put each policy.
If None, there will be no explicit device assignment.
policy_trainer_mapping (Dict[str, str], default=None): Policy-trainer mapping which identifying which trainer to
train each policy. If None, then a policy's trainer's name is the first segment of the policy's name,
separated by dot. For example, "ppo_1.policy" is trained by "ppo_1". Only policies that provided in
policy-trainer mapping are considered as trainable polices. Policies that not provided in policy-trainer
mapping will not be trained.
"""
def __init__(
self,
env_sampler: AbsEnvSampler,
agent2policy: Dict[Any, str],
policies: List[AbsPolicy],
trainers: List[AbsTrainer],
device_mapping: Dict[str, str] = None,
policy_trainer_mapping: Dict[str, str] = None,
customized_callbacks: List[Callback] = [],
) -> None:
self.env_sampler = env_sampler
self.agent2policy = agent2policy
self.policies = policies
self.trainers = trainers
self.customized_callbacks = customized_callbacks
policy_set = set([policy.name for policy in self.policies])
not_found = [policy_name for policy_name in self.agent2policy.values() if policy_name not in policy_set]
if len(not_found) > 0:
raise ValueError(f"The following policies are required but cannot be found: [{', '.join(not_found)}]")
# Remove unused policies
kept_policies = []
for policy in self.policies:
if policy.name not in self.agent2policy.values():
raise Warning(f"Policy {policy.name} is removed since it is not used by any agent.")
else:
kept_policies.append(policy)
self.policies = kept_policies
policy_set = set([policy.name for policy in self.policies])
self.device_mapping = (
{k: v for k, v in device_mapping.items() if k in policy_set} if device_mapping is not None else {}
)
self.policy_trainer_mapping = (
policy_trainer_mapping
if policy_trainer_mapping is not None
else {policy_name: policy_name.split(".")[0] for policy_name in policy_set}
)
# Check missing trainers
self.policy_trainer_mapping = {
policy_name: trainer_name
for policy_name, trainer_name in self.policy_trainer_mapping.items()
if policy_name in policy_set
}
trainer_set = set([trainer.name for trainer in self.trainers])
not_found = [
trainer_name for trainer_name in self.policy_trainer_mapping.values() if trainer_name not in trainer_set
]
if len(not_found) > 0:
raise ValueError(f"The following trainers are required but cannot be found: [{', '.join(not_found)}]")
# Remove unused trainers
kept_trainers = []
for trainer in self.trainers:
if trainer.name not in self.policy_trainer_mapping.values():
raise Warning(f"Trainer {trainer.name} if removed since no policy is trained by it.")
else:
kept_trainers.append(trainer)
self.trainers = kept_trainers
@property
def trainable_agent2policy(self) -> Dict[Any, str]:
return {
agent_name: policy_name
for agent_name, policy_name in self.agent2policy.items()
if policy_name in self.policy_trainer_mapping
}
@property
def trainable_policies(self) -> List[RLPolicy]:
policies = []
for policy in self.policies:
if policy.name in self.policy_trainer_mapping:
assert isinstance(policy, RLPolicy)
policies.append(policy)
return policies
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
cc0c55576ca8c77495cb64ff56021fd3e2852733
|
e3d9592ff05f225433e1689ec70253043a360ee2
|
/hackerrank/python/regex_and_parsing/roman_numerals.py
|
a754d0a19b5448bb11bc357181bcb56b1a1b9310
|
[] |
no_license
|
jreiher2003/code_challenges
|
cad28cac57b6e14ffd30d2b7fe00abdba8b3fa47
|
ac03c868b28e1cfa22d8257366e7a0f8f757ad8c
|
refs/heads/master
| 2020-04-16T02:25:26.267418
| 2016-12-12T01:23:56
| 2016-12-12T01:23:56
| 58,969,218
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
import re
pattern = "^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$"
if re.search(pattern, "MMMM"):
print "True"
else:
print "False"
|
[
"jreiher2003@yahoo.com"
] |
jreiher2003@yahoo.com
|
5ae87f822d3e84a9ad09251271d260615c2fb3e0
|
913674143b4cbc8df4b93975169758c17e4d7972
|
/MyScrapyTencent/MyScrapyTencent/pipelines.py
|
58a79ba46c011cefb115bb334ef40f9692f9d9c6
|
[] |
no_license
|
HeywoodKing/Scrapy-Examples
|
e8dca55767537609d5a6e5ba426c58dbb4669610
|
e6e5415cc42c234c25ce43ad0c4227364798af3c
|
refs/heads/master
| 2020-05-16T12:51:25.206950
| 2019-04-26T12:25:34
| 2019-04-26T12:25:34
| 183,056,875
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class MyscrapytencentPipeline(object):
"""
功能:保存item数据
"""
def __init__(self):
self.filename = open("tencent.json", "wb")
def process_item(self, item, spider):
text = json.dumps(dict(item), ensure_ascii=False) + ",\n"
self.filename.write(text.encode("utf-8"))
return item
def close_spider(self, spider):
self.filename.close()
|
[
"opencoding@hotmail.com"
] |
opencoding@hotmail.com
|
6da511645e7a197feda5f07313d5b595c8da81ab
|
bb726031eb8ab8e690786a766b679f0666695c10
|
/urlhunter/blueprints/main.py
|
d01b118d39775b17480b5a7e51a603c41b8cc29b
|
[] |
no_license
|
nsdown/urlhunter
|
01fd9348fc36b2e7f6cebb3488ea5fb021e54fef
|
1181d89d2f8c731e9693ec14a2fc4beee5b85d71
|
refs/heads/master
| 2020-04-13T01:18:56.216614
| 2018-12-23T04:17:52
| 2018-12-23T04:17:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,085
|
py
|
import requests
from more_itertools import flatten
from flask import Blueprint, render_template, flash, redirect, url_for, request, current_app, Markup
from flask_login import login_required, current_user
from urlhunter.forms import URLForm, RegexForm
from urlhunter.utils import links
from urlhunter.models import Url, Regex
from urlhunter.extensions import db
bp = Blueprint('main', __name__)
@bp.route('/')
def index():
return render_template('main/index.html')
@bp.route('/extract', methods=['GET', 'POST'])
@login_required
def extract():
form = URLForm()
extracted_urls = None
if form.validate_on_submit():
try:
if Url.query.count() > current_app.config['URL_LIMIT']:
raise Exception(f"URL超限了!请先清空![{Url.query.count()}/{current_app.config['URL_LIMIT']}]")
urls = form.urls.data.split('\n')
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
responses = [
requests.get(url.strip(), headers=headers) for url in urls
]
if form.search.data:
if form.use_regex.data is True:
extracted_urls = [
links(resp, pattern=form.search.data)
for resp in responses
]
else:
extracted_urls = [
links(resp, search=form.search.data)
for resp in responses
]
else:
extracted_urls = [links(resp) for resp in responses]
extracted_urls = list(flatten(extracted_urls))
if len(extracted_urls) == 0:
raise Exception('啊嘞?啥都没捕捉到哦U_U')
except Exception as e:
flash(e, 'danger')
else:
flash(
Markup(
f"成功捕获到{len(extracted_urls)}条url XD <a href={url_for('main.show_urls')}>立即查看</a>"
), 'success')
for extracted_url in extracted_urls:
if not Url.query.filter_by(body=extracted_url).first():
url = Url(
body=extracted_url,
owner=current_user._get_current_object())
db.session.add(url)
db.session.commit()
return redirect(url_for('main.extract'))
form.urls.data = request.args.get('urls')
form.search.data = request.args.get('search')
form.use_regex.data = bool(request.args.get('use_regex'))
return render_template(
'main/extract.html', form=form, extracted_urls=extracted_urls)
@bp.route('/urls')
@login_required
def show_urls():
page = request.args.get('page', 1, type=int)
per_page = current_app.config['URL_PER_PAGE']
current_user_urls = Url.query.with_parent(current_user)
pagination = current_user_urls.order_by(
Url.timestamp.desc()).paginate(page, per_page)
urls = pagination.items
outputs = '\n'.join([url.body for url in current_user_urls.all()])
exclude_strings = request.args.get('exclude')
if exclude_strings:
urls, outputs = exclude_urls(exclude_strings, urls, current_user_urls)
return render_template('main/urls.html', urls=urls, pagination=pagination, outputs=outputs)
@bp.route('/urls/delete/all', methods=['POST'])
@login_required
def delete_urls():
urls = Url.query.with_parent(current_user._get_current_object()).all()
db.session.execute('delete from url')
db.session.commit()
flash('操作成功!', 'success')
return redirect(url_for('main.show_urls'))
@bp.route('/urls/search')
@login_required
def search_urls():
q = request.args.get('q', '')
if q == '':
flash('请输入要搜索的URL', 'warning')
return redirect(url_for('main.show_urls'))
page = request.args.get('page', 1, type=int)
per_page = current_app.config['SEARCH_PER_PAGE']
searched_urls = Url.query.with_parent(current_user).filter(Url.body.ilike(f'%{q}%'))
pagination = searched_urls.paginate(page, per_page)
results = pagination.items
outputs = '\n'.join([url.body for url in searched_urls.all()])
exclude_strings = request.args.get('exclude')
if exclude_strings:
results, outputs = exclude_urls(exclude_strings, results, searched_urls)
return render_template('main/search.html', pagination=pagination, results=results, q=q, outputs=outputs)
@bp.route('/regexs')
@login_required
def show_regexs():
page = request.args.get('page', 1, type=int)
per_page = current_app.config['REGEX_PER_PAGE']
pagination = Regex.query.with_parent(current_user).paginate(page, per_page)
regexs = pagination.items
return render_template('main/regexs.html', regexs=regexs, pagination=pagination)
@bp.route('/regexs/upload', methods=['GET', 'POST'])
@login_required
def upload_regex():
form = RegexForm()
if form.validate_on_submit():
name = form.name.data
site = form.site.data
body = form.body.data
regex = Regex(name=name, site=site, body=body, author=current_user._get_current_object())
db.session.add(regex)
db.session.commit()
flash('添加成功!', 'success')
return redirect(url_for('main.show_regexs'))
return render_template('main/upload.html', form=form)
@bp.route('/help')
def show_help():
return render_template('main/help.html')
@bp.app_template_global()
def exclude_urls(exclude_strings, paginated_urls, all_urls):
exclude_strings = exclude_strings.split(',')
excluded_paginated_urls = [url for url in paginated_urls if not any(es in url.body for es in exclude_strings)]
excluded_all_urls = [url for url in all_urls.all() if not any(es in url.body for es in exclude_strings)]
excluded_outputs = '\n'.join([url.body for url in excluded_all_urls])
return excluded_paginated_urls, excluded_outputs
|
[
"2582347430@qq.com"
] |
2582347430@qq.com
|
b93b4f5be48c893c85210950d92f8401699b04e8
|
3e7acb0962da48f29875d581c424ebbd3a98437c
|
/test/test_timeslotpage.py
|
cff39a4c93196a3844ea72a47ba486249dc72d75
|
[] |
no_license
|
crazypoo/geniusbar-reserver
|
ae02373bfaf24c5fdcd230c747c162ef259c2396
|
b5d1981b59d998d8f110c87e8c5aa7ebe067f96c
|
refs/heads/master
| 2021-01-21T08:14:52.607615
| 2014-12-13T05:41:14
| 2014-12-13T05:41:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
import sys
if '../' not in sys.path:
sys.path.append('../')
from sites.apple_main import AppleGeniusBarReservation
from sites.apple_genius_bar.store_page import GeniusbarPage
import sys
reload(sys)
import os
sys.setdefaultencoding('utf-8')
cwd = os.path.abspath(os.getcwd())
# sites
sys.path.append(os.path.join(cwd, '../gui'))
sys.path.append(os.path.join(cwd, '../proxy'))
sys.path.append(os.path.join(cwd, '../sites'))
sys.path.append(os.path.join(cwd, '../utils'))
def test_timeslotpage():
appleGeniusBarReservation = AppleGeniusBarReservation({})
page = GeniusbarPage('')
f = open('timeslots.htm', 'r')
data = f.read()
f.close()
data = data.encode('utf-8', 'ignore')
ret, maxrow = appleGeniusBarReservation.buildTimeSlotsTable(page, data)
return ret, maxrow
#test_timeslotpage()
# -*- coding: utf-8 -*-
import sys
import os
from PyQt4 import QtGui
from gui.uidesigner.taskviewwidget import TaskViewWidget
def doubleclicked(timestr):
print(timestr)
def main(proxyServers=None):
data, maxrow = test_timeslotpage()
app = QtGui.QApplication(sys.argv)
main = TaskViewWidget()
main.fillTableWidget(data, maxrow)
main.sigTimeSlot.connect(doubleclicked)
main.show()
app.exec_()
if __name__ == "__main__":
main()
|
[
"flowinair@gmail.com"
] |
flowinair@gmail.com
|
dda50d0d9bf5a626f3b2921adbcb876a38c06129
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/temp_man/lombok_database_family/idea_family/game_head.py
|
8f2901d0914575e6572ca848757d9392e89f1bf6
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
using System;
using System.Net;
using System.Net.Http;
using System.Threading.Tasks;
using Microsoft.Translator.API;
namespace CSharp_TranslateSample
{
public class Program
{
private const string SubscriptionKey = "774041bc63961f2d2f59c34b29325acd"; //Enter here the Key from your Microsoft Translator Text subscription on http://portal.azure.com
public static string traducida;
public static void Main(string[] args)
{
//TranslateAsync().Wait();
//Console.ReadKey();
}
public static void iniciar() {
TranslateAsync().Wait();
Console.ReadKey();
}
/// Demonstrates getting an access token and using the token to translate.
private static async Task TranslateAsync()
{
var translatorService = new TranslatorService.LanguageServiceClient();
var authTokenSource = new AzureAuthToken(SubscriptionKey);
var token = string.Empty;
try
{
token = await authTokenSource.GetAccessTokenAsync();
}
catch (HttpRequestException)
{
switch (authTokenSource.RequestStatusCode)
{
case HttpStatusCode.Unauthorized:
Console.WriteLine("Request to token service is not authorized (401). Check that the Azure subscription key is valid.");
break;
case HttpStatusCode.Forbidden:
Console.WriteLine("Request to token service is not authorized (403). For accounts in the free-tier, check that the account quota is not exceeded.");
break;
}
throw;
}
traducida = translatorService.Translate(token, "Hello World", "en", "fr", "text/plain", "general", string.Empty);
//Console.WriteLine("Translated to French: {0}", translatorService.Translate(token, "Hello World", "en", "fr", "text/plain", "general", string.Empty));
}
}
}
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
656c9f58562a43a8ffa99171fc48c4ef5fad535a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/177/usersdata/271/100509/submittedfiles/pico.py
|
392e7558a7b348a1605eca85241f40ce1d490a13
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
# -*- coding: utf-8 -*-
#FUNÇÕES
def crescente(lista) :
if len(lista)==1 :
return(False)
cont = 0
for i in range(0,len(lista),1) :
if (i==0) :
if(lista[i]<lista[i+1]) :
cont = cont+1
elif (i==len(lista)-1) :
if lista[len(lista-2)] < lista[len(lista)-1] :
cont = cont+1
else :
if lista[i]<lista[i+1] :
cont = cont+1
if cont == len(lista) :
return(True)
else :
return(False)
def decrescente(lista) :
if len(lista)==1 :
return(False)
cont = 0
for i in range(0,len(lista),1) :
if (i==0) :
if(lista[i]>lista[i+1]) :
cont = cont+1
elif (i==len(lista)-1) :
if lista[len(lista-2)] > lista[len(lista)-1] :
cont = cont+1
else :
if lista[i]>lista[i+1] :
cont = cont+1
if cont == len(lista) :
return(True)
else :
return(False)
def pico(lista):
maior = max(lista)
imaior = lista.index(maior)
an = []
de = []
for i in range(0,imaior+1,1):
elemento_an = lista[i]
an.append (elemento_an)
for i in range (maior+1,len(lista),1) :
elemento_de = lista[i]
de.append(elemento_de)
if crescente (an) and decrescente(de) :
return(True)
else :
return(False)
#ENTRADA
n = input('Digite a quantidade de elementos da lista: ')
a = []
for i in range(0,n,1) :
valor_a = float(input('Digite o elemento da lista a : '))
a.append(valor_a)
if pico(a) :
print('S')
else :
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
567deda66b4abddd9fbccea623c2fb3c285bc502
|
21dd7d56c370ea9a02b66654525fd96a398a9e49
|
/apps/userprofile/migrations/0012_auto_20150706_1530.py
|
f76414aec2906f0309739ccb3346f2edf38718b3
|
[] |
no_license
|
hqpr/fame
|
fdad5d03bf9ee7ca31ae8a4701ff05bafd49540f
|
8b77e3a822ae70ee6d79a8003e1d9f9bc5ba8355
|
refs/heads/master
| 2023-01-14T16:58:46.533090
| 2015-08-31T15:37:09
| 2015-08-31T15:37:09
| 35,205,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0011_auto_20150705_1058'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='modified',
field=models.DateTimeField(default=datetime.datetime(2015, 7, 6, 15, 30, 35, 789165)),
),
]
|
[
"adubnyak@gmail.com"
] |
adubnyak@gmail.com
|
a3720504b0405edeb5fa1836f9f6c46354683a49
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/kombu/utils/limits.py
|
833cb96a47fce4401f66851a80cdda399eb46c6a
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237
| 2020-09-06T03:15:14
| 2020-09-06T03:15:14
| 182,013,100
| 9
| 2
|
Apache-2.0
| 2022-12-09T21:01:00
| 2019-04-18T03:57:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,195
|
py
|
"""
kombu.utils.limits
==================
Token bucket implementation for rate limiting.
"""
from __future__ import absolute_import
from kombu.five import monotonic
__all__ = ['TokenBucket']
class TokenBucket(object):
"""Token Bucket Algorithm.
See http://en.wikipedia.org/wiki/Token_Bucket
Most of this code was stolen from an entry in the ASPN Python Cookbook:
http://code.activestate.com/recipes/511490/
.. admonition:: Thread safety
This implementation is not thread safe. Access to a `TokenBucket`
instance should occur within the critical section of any multithreaded
code.
"""
#: The rate in tokens/second that the bucket will be refilled.
fill_rate = None
#: Maximum number of tokens in the bucket.
capacity = 1
#: Timestamp of the last time a token was taken out of the bucket.
timestamp = None
def __init__(self, fill_rate, capacity=1):
self.capacity = float(capacity)
self._tokens = capacity
self.fill_rate = float(fill_rate)
self.timestamp = monotonic()
def can_consume(self, tokens=1):
"""Return :const:`True` if the number of tokens can be consumed
from the bucket. If they can be consumed, a call will also consume the
requested number of tokens from the bucket. Calls will only consume
`tokens` (the number requested) or zero tokens -- it will never consume
a partial number of tokens."""
if tokens <= self._get_tokens():
self._tokens -= tokens
return True
return False
def expected_time(self, tokens=1):
"""Return the time (in seconds) when a new token is expected
to be available. This will not consume any tokens from the bucket."""
_tokens = self._get_tokens()
tokens = max(tokens, _tokens)
return (tokens - _tokens) / self.fill_rate
def _get_tokens(self):
if self._tokens < self.capacity:
now = monotonic()
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
|
[
"leibingye@outlook.com"
] |
leibingye@outlook.com
|
395d5d325531f6a44e47ababe037cd9701a736b0
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/surface/artifacts/files/__init__.py
|
efc655f736b989835c4478beee69c049eda522b7
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for Artifact Registry files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Files(base.Group):
"""Manage Artifact Registry files.
## EXAMPLES
To list all files in the current project and `artifacts/repository` and
`artifacts/location` properties are set,
run:
$ {command} list
To list files under repository my-repo in the current project and location,
run:
$ {command} list --repository=my-repo
"""
category = base.CI_CD_CATEGORY
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
d28b2ba189d4cd74c177b25d287aeae3ef6cade1
|
74091dce735f281188d38d2f00d1a68e1d38ff7a
|
/pytest_udemy_params_datadriven_crossbrowser/tests/conftest.py
|
8e184dcf876ae8b91e9a90573978768b1f73f6a5
|
[] |
no_license
|
nbiadrytski-zz/python-training
|
96741aa0ef37bda32d049fde5938191025fe2924
|
559a64aae2db51e11812cea5ff602f25953e8070
|
refs/heads/master
| 2023-05-07T04:08:23.898161
| 2019-12-10T12:12:59
| 2019-12-10T12:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
from pytest import fixture
from selenium import webdriver
import json
data_path = 'test_data.json'
def load_test_data(path):
with open(path) as data_file:
data = json.load(data_file)
return data
@fixture(params=load_test_data(data_path))
def tv_brand(request):
data = request.param
return data
@fixture(params=[webdriver.Chrome, webdriver.Firefox], ids=['Chrome', 'FF'])
def browser(request):
driver = request.param
drvr = driver()
yield drvr
drvr.quit()
|
[
"Mikalai_Biadrytski@epam.com"
] |
Mikalai_Biadrytski@epam.com
|
e42d622c1426a68b75b5c9503972b27ca2794746
|
f04089bf7df2b7cd4058f8df3ffe747c349afd60
|
/multiply_strings.py
|
77836126e3b8f258f7bf2527d3d6e09eaf5f1475
|
[] |
no_license
|
dbialon/LeetCode
|
d2a674f7c5ba51a3714ca46026d2a5d37681919d
|
d28c63779212519fe38e98b9f0e0cbf48137bc43
|
refs/heads/master
| 2023-01-12T06:27:32.516504
| 2020-11-18T11:35:00
| 2020-11-18T11:35:00
| 259,928,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# https://leetcode.com/problems/multiply-strings/
# given two non-negative integers num1 and num2 represented
# as strings, return the product of num1 and num2, also
# represented as a string.
def multiply(num1: str, num2: str) -> str:
if num1 == "0" or num2 == "0":
return "0"
table = {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6":6, "7": 7,
"8": 8, "9": 9, "0": 0}
n_1, n_2 = 0, 0
l1, l2 = len(num1), len(num2)
for i in range(l1):
n_1 += table[num1[i]] * 10 ** (l1 - i - 1)
for i in range(l2):
n_2 += table[num2[i]] * 10 ** (l2 - i - 1)
result = n_1 * n_2
num3 = ""
while result > 0:
# for key, value in table.items():
# if result % 10 == value:
# num3 = key + num3
num3 = chr(48 + result % 10) + num3
result //= 10
return num3
str_1 = "12312312312312312312313123123123123123123123123123123123"
str_2 = "13213213213213213213232132132132132132132132132132132132"
print(multiply(str_1, str_2))
|
[
"dbialon79@outlook.com"
] |
dbialon79@outlook.com
|
be74904cdc25d4d4c7cf0e60588433a0cca13e45
|
c7cbbd4b1c1e281cef5f4a0c4e3d4a97cee2241e
|
/froide/foirequest/feeds.py
|
f7f5013a62598bd972ecd4613d1f94601890043d
|
[
"MIT"
] |
permissive
|
manonthemat/froide
|
078cf78a6eb35226512c0bdfa2ac9043bcc81ad9
|
698c49935eaf2e922f3c9f6a46af0fd545ccbbbb
|
refs/heads/master
| 2020-08-14T08:19:36.215473
| 2019-10-14T19:43:16
| 2019-10-14T19:43:16
| 215,129,869
| 0
| 0
|
MIT
| 2019-10-14T19:35:49
| 2019-10-14T19:35:49
| null |
UTF-8
|
Python
| false
| false
| 4,189
|
py
|
import re
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from django.shortcuts import get_object_or_404
from .models import FoiRequest
from .filters import FOIREQUEST_FILTER_DICT
CONTROLCHARS_RE = re.compile(r'[\x00-\x08\x0B-\x0C\x0E-\x1F]')
def clean(val):
return CONTROLCHARS_RE.sub('', val)
class LatestFoiRequestsFeed(Feed):
url_name = 'foirequest-list_feed'
def __init__(self, items, data, make_url):
self.items = items
self.data = data
self.make_url = make_url
super(LatestFoiRequestsFeed, self).__init__()
def get_filter_string(self):
by = []
if self.data.get('q'):
by.append(_('search for "%s"' % self.data['q']))
if self.data.get('category'):
by.append(_('by category %(category)s') % {'category': self.data['category'].name})
if self.data.get('status'):
by.append(_('by status %(status)s') % {
'status': FOIREQUEST_FILTER_DICT[self.data['status']][1]
})
if self.data.get('tag'):
by.append(_('by tag %(tag)s') % {'tag': self.data['tag'].name})
if self.data.get('jurisdiction'):
by.append(_('for %(juris)s') % {'juris': self.data['jurisdiction'].name})
if self.data.get('publicbody'):
by.append(_('to %(publicbody)s') % {'publicbody': self.data['publicbody'].name})
return ' '.join(str(x) for x in by)
def title(self, obj):
by = self.get_filter_string()
if by:
return clean(_("Freedom of Information Requests %(by)s on %(sitename)s") % {
"sitename": settings.SITE_NAME,
'by': by
})
return clean(_("Freedom of Information Requests on %(sitename)s") % {
"sitename": settings.SITE_NAME
})
def description(self, obj):
by = self.get_filter_string()
if by:
return clean(_("This feed contains the Freedom of Information requests %(by)s"
" that have been made through %(sitename)s.") % {
"sitename": settings.SITE_NAME,
'by': by
})
return clean(_("This feed contains the latest Freedom of Information requests"
" that have been made through %(sitename)s.") % {
"sitename": settings.SITE_NAME
})
def link(self):
return self.make_url(self.url_name)
def items(self):
return self.items.order_by("-first_message")[:15]
def item_title(self, item):
if item.public_body:
pb_name = item.public_body.name
else:
pb_name = _("Not yet known")
return clean(_("'%(title)s' to %(publicbody)s") % {
"title": item.title,
"publicbody": pb_name
})
def item_description(self, item):
return clean(item.description)
def item_pubdate(self, item):
return item.first_message
class LatestFoiRequestsFeedAtom(LatestFoiRequestsFeed):
feed_type = Atom1Feed
subtitle = LatestFoiRequestsFeed.description
url_name = 'foirequest-list_feed_atom'
class FoiRequestFeed(Feed):
def get_object(self, request, slug):
return get_object_or_404(FoiRequest, slug=slug, public=True)
def title(self, obj):
return clean(obj.title)
def link(self, obj):
return reverse('foirequest-feed', kwargs={"slug": obj.slug})
def description(self, obj):
return clean(obj.description)
def items(self, obj):
return obj.foievent_set.order_by("-timestamp")[:15]
def item_title(self, item):
return clean(item.as_text())
def item_description(self, item):
return clean(item.as_text())
def item_pubdate(self, item):
return item.timestamp
class FoiRequestFeedAtom(FoiRequestFeed):
feed_type = Atom1Feed
subtitle = FoiRequestFeed.description
def link(self, obj):
return reverse('foirequest-feed_atom', kwargs={"slug": obj.slug})
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
e68fc4550c477bbbfcb9e72f120f12025db3050e
|
648411bd760b9c7018da516b4617b05a0d3a5dee
|
/core/config.py
|
15bb43be748390910230122b73bbe541489e7487
|
[
"Apache-2.0"
] |
permissive
|
cartel32/ChefAPI
|
6de457a4e4f7fd0a2123da1c97058565ccdcdc34
|
bb622db1fb09feb85488438a6d840e0b6383b679
|
refs/heads/main
| 2023-06-09T15:09:27.923326
| 2021-06-26T02:03:38
| 2021-06-26T02:03:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
import secrets
from typing import Any, Dict, List, Optional, Union
from pydantic import AnyHttpUrl, BaseSettings, EmailStr, Field, HttpUrl, PostgresDsn, validator
class Settings(BaseSettings):
API_V1_STR = "/api/v1"
SECRET_KEY: str = secrets.token_urlsafe(64)
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []
PROJECT_NAME: str = "ChefAPI"
# BACKEND_CORS_ORIGINS is a JSON-formatted list of origins
# e.g: '["http://localhost", "http://localhost:4200", "http://localhost:3000", \
# "http://localhost:8080", "http://local.dockertoolbox.tiangolo.com"]'
POSTGRES_SERVER: str = Field(..., env="POSTGRES_SERVER")
POSTGRES_USER: str = Field(..., env="POSTGRES_USER")
POSTGRES_PASSWORD: str = Field(..., env="POSTGRES_PASSWORD")
POSTGRES_DB: str = Field(..., env="POSTGRES_DB")
SQLALCHEMY_DATABASE_URI: Optional[PostgresDsn] = None
@validator("SQLALCHEMY_DATABASE_URI", pre=True)
def assemble_db_connection(cls, v: Optional[str], values: Dict[str, Any]) -> Any:
if isinstance(v, str):
return v
return PostgresDsn.build(
scheme="postgresql",
user=values.get("POSTGRES_USER"),
password=values.get("POSTGRES_PASSWORD"),
host=values.get("POSTGRES_SERVER"),
path=f"/{values.get('POSTGRES_DB') or ''}",
)
class Config:
case_sensitive = True
env_file = '.env'
env_file_encoding = 'utf-8'
settings = Settings()
|
[
"yasserth19@gmail.com"
] |
yasserth19@gmail.com
|
d020b391aa17fab9770e8607e005440b02e93769
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Keras_tensorflow_nightly/source2.7/google/protobuf/internal/factory_test1_pb2.py
|
8240f242a12b6b761056dc5e7317920be7dc0c1c
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| true
| 7,816
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/factory_test1.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/factory_test1.proto',
package='google.protobuf.python.internal',
syntax='proto2',
serialized_pb=_b('\n,google/protobuf/internal/factory_test1.proto\x12\x1fgoogle.protobuf.python.internal\"\xd5\x03\n\x0f\x46\x61\x63tory1Message\x12\x45\n\x0e\x66\x61\x63tory_1_enum\x18\x01 \x01(\x0e\x32-.google.protobuf.python.internal.Factory1Enum\x12\x62\n\x15nested_factory_1_enum\x18\x02 \x01(\x0e\x32\x43.google.protobuf.python.internal.Factory1Message.NestedFactory1Enum\x12h\n\x18nested_factory_1_message\x18\x03 \x01(\x0b\x32\x46.google.protobuf.python.internal.Factory1Message.NestedFactory1Message\x12\x14\n\x0cscalar_value\x18\x04 \x01(\x05\x12\x12\n\nlist_value\x18\x05 \x03(\t\x1a&\n\x15NestedFactory1Message\x12\r\n\x05value\x18\x01 \x01(\t\"P\n\x12NestedFactory1Enum\x12\x1c\n\x18NESTED_FACTORY_1_VALUE_0\x10\x00\x12\x1c\n\x18NESTED_FACTORY_1_VALUE_1\x10\x01*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02*<\n\x0c\x46\x61\x63tory1Enum\x12\x15\n\x11\x46\x41\x43TORY_1_VALUE_0\x10\x00\x12\x15\n\x11\x46\x41\x43TORY_1_VALUE_1\x10\x01')
)
_FACTORY1ENUM = _descriptor.EnumDescriptor(
name='Factory1Enum',
full_name='google.protobuf.python.internal.Factory1Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FACTORY_1_VALUE_0', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FACTORY_1_VALUE_1', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=553,
serialized_end=613,
)
_sym_db.RegisterEnumDescriptor(_FACTORY1ENUM)
Factory1Enum = enum_type_wrapper.EnumTypeWrapper(_FACTORY1ENUM)
FACTORY_1_VALUE_0 = 0
FACTORY_1_VALUE_1 = 1
_FACTORY1MESSAGE_NESTEDFACTORY1ENUM = _descriptor.EnumDescriptor(
name='NestedFactory1Enum',
full_name='google.protobuf.python.internal.Factory1Message.NestedFactory1Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NESTED_FACTORY_1_VALUE_0', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NESTED_FACTORY_1_VALUE_1', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=460,
serialized_end=540,
)
_sym_db.RegisterEnumDescriptor(_FACTORY1MESSAGE_NESTEDFACTORY1ENUM)
_FACTORY1MESSAGE_NESTEDFACTORY1MESSAGE = _descriptor.Descriptor(
name='NestedFactory1Message',
full_name='google.protobuf.python.internal.Factory1Message.NestedFactory1Message',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.python.internal.Factory1Message.NestedFactory1Message.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=420,
serialized_end=458,
)
_FACTORY1MESSAGE = _descriptor.Descriptor(
name='Factory1Message',
full_name='google.protobuf.python.internal.Factory1Message',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='factory_1_enum', full_name='google.protobuf.python.internal.Factory1Message.factory_1_enum', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nested_factory_1_enum', full_name='google.protobuf.python.internal.Factory1Message.nested_factory_1_enum', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nested_factory_1_message', full_name='google.protobuf.python.internal.Factory1Message.nested_factory_1_message', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scalar_value', full_name='google.protobuf.python.internal.Factory1Message.scalar_value', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list_value', full_name='google.protobuf.python.internal.Factory1Message.list_value', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FACTORY1MESSAGE_NESTEDFACTORY1MESSAGE, ],
enum_types=[
_FACTORY1MESSAGE_NESTEDFACTORY1ENUM,
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000, 536870912), ],
oneofs=[
],
serialized_start=82,
serialized_end=551,
)
_FACTORY1MESSAGE_NESTEDFACTORY1MESSAGE.containing_type = _FACTORY1MESSAGE
_FACTORY1MESSAGE.fields_by_name['factory_1_enum'].enum_type = _FACTORY1ENUM
_FACTORY1MESSAGE.fields_by_name['nested_factory_1_enum'].enum_type = _FACTORY1MESSAGE_NESTEDFACTORY1ENUM
_FACTORY1MESSAGE.fields_by_name['nested_factory_1_message'].message_type = _FACTORY1MESSAGE_NESTEDFACTORY1MESSAGE
_FACTORY1MESSAGE_NESTEDFACTORY1ENUM.containing_type = _FACTORY1MESSAGE
DESCRIPTOR.message_types_by_name['Factory1Message'] = _FACTORY1MESSAGE
DESCRIPTOR.enum_types_by_name['Factory1Enum'] = _FACTORY1ENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Factory1Message = _reflection.GeneratedProtocolMessageType('Factory1Message', (_message.Message,), dict(
NestedFactory1Message = _reflection.GeneratedProtocolMessageType('NestedFactory1Message', (_message.Message,), dict(
DESCRIPTOR = _FACTORY1MESSAGE_NESTEDFACTORY1MESSAGE,
__module__ = 'google.protobuf.internal.factory_test1_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.python.internal.Factory1Message.NestedFactory1Message)
))
,
DESCRIPTOR = _FACTORY1MESSAGE,
__module__ = 'google.protobuf.internal.factory_test1_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.python.internal.Factory1Message)
))
_sym_db.RegisterMessage(Factory1Message)
_sym_db.RegisterMessage(Factory1Message.NestedFactory1Message)
# @@protoc_insertion_point(module_scope)
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
549bed9daf0f4ef8d0ed86767a77ca00430090ef
|
5b70fbd53b534306c146ffb98a0f99d2343a948f
|
/src/Python/Problem587.py
|
2ba944f8d8d9652b2e76c40f120339f18f824584
|
[] |
no_license
|
aniruddhamurali/Project-Euler
|
1f4ff3aa1e9c4efbc2a85026821e19a28b5edf90
|
408b3098fbc98ff3954679602c0468ddb56ea0ac
|
refs/heads/master
| 2020-03-20T23:07:22.178103
| 2018-07-27T01:40:46
| 2018-07-27T01:40:46
| 137,830,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
# Problem 587
# Answer: 2240
import math
import itertools
def main():
# The indefinite integral of (1 - sqrt(2x - x^2)) dx.
def integral(x):
t = x - 1.0
return t - (math.sqrt(x * (2.0 - x)) * t + math.asin(t)) / 2.0
lsectionarea = 1.0 - math.pi/4.0
for i in itertools.count(1):
slope = 1.0 / i
a = slope**2 + 1.0
b = -2.0 * (slope + 1.0)
c = 1.0
x = (2.0 * c) / (-b + math.sqrt(b * b - 4 * a * c))
concavetrianglearea = (x**2 * slope / 2) + (integral(1.0) - integral(x))
if concavetrianglearea/lsectionarea < 0.001:
print(i)
return str(i)
main()
|
[
"aniruddha.murali@gmail.com"
] |
aniruddha.murali@gmail.com
|
f8772b8d3610eba283c33f7c31d167ed869482c9
|
7f29e2c30e047ec59fb104d4f0953a8a8bbead51
|
/rb/complexity/rhythm/usage.py
|
6c65f3db5628a3b48d6c2209ab3f4b4d85d86f6e
|
[
"Apache-2.0"
] |
permissive
|
rwth-acis/readerbenchpy
|
69597dc1f2d500aea2be49981799aa20a0e6ea68
|
1a070ae678f58ccd6f358c0802bdf0b3b3dde9d3
|
refs/heads/main
| 2023-07-17T11:57:25.507494
| 2021-09-03T12:49:10
| 2021-09-03T12:49:10
| 348,343,445
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
from rb.core.document import Document
from rb.core.lang import Lang
from rb.core.text_element_type import TextElementType
from rb.complexity.measure_function import MeasureFunction
from rb.complexity.rhythm.no_alliterations import NoAlliterations
from rb.complexity.rhythm.no_assonances import NoAssonances
# import nltk
# nltk.download()
txt1 = """
This is a sample document.
It can contain, multiple sentences and paragraphs and repeating sentences.
This is considered a new block (paragraph).
Therefore in total are 3 blocks.
"""
txt2 = """
S-a născut repede la 1 februarie 1852,[3] în satul Haimanale (care astăzi îi poartă numele), fiind primul născut al lui Luca Ștefan Caragiale și al Ecaterinei Chiriac Karaboas. Conform unor surse, familia sa ar fi fost de origine aromână.[6] Tatăl său, Luca (1812 - 1870), și frații acestuia, Costache și Iorgu, s-au născut la Constantinopol,
fiind fiii lui Ștefan, un bucătar angajat la sfârșitul anului 1812 de Ioan Vodă Caragea în suita sa.
"""
alliteration1 = """
Greedy goats gobbled up gooseberries, getting good at grabbing the goodies.
"""
alliteration2 = """
Up the aisle the moans and screams merged with the sickening smell of woolen black clothes worn in summer weather and green leaves wilting over yellow flowers.
"""
alliteration3 = """
When the canary keeled over the coal miners left the cave.
"""
alliteration4 = """
I forgot my flip phone but felt free.
"""
assonance1 = """
That solitude which suits abstruser musings.
"""
assonance2 = """
I must confess that in my quest I felt depressed and restless.
"""
# element = Document(Lang.EN, alliteration2)
# index = NoAlliterations(element.lang, TextElementType.SENT.value, None)
# index.process(element)
# element = Document(Lang.EN, assonance1)
# index = NoAssonances(element.lang, TextElementType.SENT.value, None)
# index.process(element)
element = Document(Lang.RO, txt2)
|
[
"karlydiamond214@gmail.com"
] |
karlydiamond214@gmail.com
|
e29ee6ad0af2ac05c635f6f43c64c1d6f67ddf13
|
c9ab605cdd2dbf92c9de05768ade0ecf1718be02
|
/api 활용/rest.py
|
5edba3b8356a963deb8a80da2fff32a437fe3817
|
[] |
no_license
|
PyeongGang-Kim/TIL
|
42d69308cf99d2e07644b51d7636e1b64551a697
|
8711501d131ee7d78fdaac544dda2008adf820a1
|
refs/heads/master
| 2023-01-12T21:10:38.027946
| 2021-10-23T07:19:48
| 2021-10-23T07:19:48
| 195,937,990
| 10
| 1
| null | 2023-01-07T11:25:30
| 2019-07-09T05:22:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 977
|
py
|
import requests, json
def ans():
global data
data["yourAnswer"] = input()
def seturl():
global url
url = urlbase+res.json()['nextUrl']
r = []
data = {
"nickname": "구미1반김평강",
"yourAnswer":"1"
}
urlbase = "http://13.125.222.176/quiz/"
idx = "jordan"
url = urlbase+idx
header = {
"Accept": "application/json",
"Content-Type": "application/json"
}
tmp = ''
while 1:
res = requests.post(url, data=json.dumps(data), headers=header)
if res.status_code == 200:
r.append(data["yourAnswer"])
r.append(res.json())
if res.json()['nextUrl'] == "수고하셨습니다.":
break
print(res.json()['question'])
if res.json()['code'] == 600:
print("오답입니다.")
print(tmp)
ans()
continue
seturl()
ans()
tmp = res.json()['question']
else:
print(res)
ans()
for rr in r:
print(rr)
|
[
"pyeonggangkim@gmail.com"
] |
pyeonggangkim@gmail.com
|
aa790bc34441cd4186ce10350ab783afe7095ad8
|
1edd52cf197e5ae67b5939a3beb3e70761334e62
|
/Notes/Notes/Udemy/Aws_boto3_refresh/Session-5-Waiters/tags1.py
|
81cc86fb506d96deb89eae42df117690eaa4687a
|
[] |
no_license
|
sandeepmchary/Devops_wordpress_Notes
|
bdcd85d526780d03c494ecb93e714e7ffe0a4d58
|
ffd2092162073e1e7342c6066d023d04e6ca8c1c
|
refs/heads/master
| 2022-06-18T21:33:02.471025
| 2022-06-12T11:14:47
| 2022-06-12T11:14:47
| 154,679,658
| 1
| 4
| null | 2022-05-19T16:59:57
| 2018-10-25T13:51:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
import boto3
from pprint import pprint
ec2_re = boto3.resource('ec2',)
ec2_cli = boto3.client('ec2',)
response=ec2_cli.describe_tags()
values_list=[]
for each_in in ec2_cli.describe_instances()['Reservations']:
for each_res in each_in['Instances']:
for each_tag in response['Tags']:
values_list.append(each_tag)
#print("id:{0}\nState:{1}\nPublic_Dns_name:{2}\ntag:{3}".format(each_res['InstanceId'],each_res['State']['Name'],each_res['PublicDnsName'],each_tag['Value']))
print(values_list[0]['Value'])
|
[
"awssandeepchary@gmail.com"
] |
awssandeepchary@gmail.com
|
ae83b1bf593af03cf04cf0ad76ea4689908db278
|
e3d061e3692c4003b616dfc9b7655ad7889b5b10
|
/apps/strawberry_fields_listener.py
|
b3c6c3d0731862f381a18fc1bda242ac3335093e
|
[
"Apache-2.0"
] |
permissive
|
nuzumco/blackbird
|
515a84a40e4a98eefb18e7c98cbc358d848f45d9
|
1e78aa210d9c1acfabf12ee12d25b22bafbe51e1
|
refs/heads/master
| 2020-05-18T08:55:42.843764
| 2019-04-26T18:57:56
| 2019-04-26T18:57:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,349
|
py
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-return-statements,too-many-branches,too-many-instance-attributes
"""Strawberry Fields Blackbird parser"""
import sys
import antlr4
import numpy as np
import strawberryfields as sf
import strawberryfields.ops as sfo
from blackbird import BlackbirdListener, RegRefTransform, parse
class StrawberryFieldsListener(BlackbirdListener):
"""Listener to run a Blackbird program using Strawberry Fields"""
def __init__(self):
super().__init__()
self.eng = None
self.q = None
self.state = None
self.result = []
def run(self):
if self.target is None:
raise ValueError("Blackbird program has no target backend")
self.eng, self.q = sf.Engine(
len(self.active_modes),
hbar=self.target['options'].get('hbar', 2)
)
self.target['options'].pop('hbar', None)
with self.eng:
for statement in self.queue:
modes = statement['modes']
if 'args' in statement:
args = statement['args']
kwargs = statement['kwargs']
for idx, a in enumerate(args):
if isinstance(a, RegRefTransform):
regrefs = [self.q[i] for i in a.regrefs]
args[idx] = sf.engine.RegRefTransform(regrefs, a.func, a.func_str)
op = getattr(sfo, statement['op'])(*args, **kwargs)
else:
op = getattr(sfo, statement['op'])
op | [self.q[i] for i in modes] #pylint:disable=pointless-statement
shots = self.target['options'].get('shots', 1)
self.target['options'].pop('shots', None)
for _ in range(shots):
self.eng.reset(keep_history=True)
self.state = self.eng.run(self.target['name'], **self.target['options'])
self.result.append([q.val for q in self.q])
def print_results(self):
"""Print the results of the blackbird program execution"""
print('Program')
print('-------')
self.eng.print_applied()
print()
print('Results')
print('-------')
for row in self.result:
print(row)
def run(file):
"""Parse and run a blackbird program using Strawberry Fields.
Args:
file (str): location of the .xbb blackbird file to run
Returns:
list: list of size ``[shots, num_subsystems]``, representing
the measured qumode values for each shot
"""
simulation = parse(antlr4.FileStream(file), listener=StrawberryFieldsListener)
simulation.run()
simulation.print_results()
if __name__ == '__main__':
run(sys.argv[1])
|
[
"josh146@gmail.com"
] |
josh146@gmail.com
|
c8f7dcd011aa4533b5ba8dec9e3a4ed1b8232057
|
12362aa3c315e2b72ed29193ee24e3fd7f1a57db
|
/LeetCode/0780-Reaching Points/main.py
|
0176d04c47bb9471fbfb6eb85d0b18ce9355da7a
|
[] |
no_license
|
PRKKILLER/Algorithm_Practice
|
f2f4662352516965777605ccf116dd7945c4b94a
|
73654b6567fdb282af84a868608929be234075c5
|
refs/heads/master
| 2023-07-03T23:24:15.081892
| 2021-08-09T03:55:12
| 2021-08-09T03:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
"""
A move consists of taking a point (x, y) and transforming it to either (x, x+y) or (x+y, y).
Given a starting point (sx, sy) and a target point (tx, ty), return True if and only if a sequence of moves
exists to transform the point (sx, sy) to (tx, ty). Otherwise, return False.
Examples:
Input: sx = 1, sy = 1, tx = 3, ty = 5
Output: True
Explanation:
One series of moves that transforms the starting point to the target is:
(1, 1) -> (1, 2)
(1, 2) -> (3, 2)
(3, 2) -> (3, 5)
Input: sx = 1, sy = 1, tx = 2, ty = 2
Output: False
Input: sx = 1, sy = 1, tx = 1, ty = 1
Output: True
"""
class Solution:
"""
1. 方法1: Top-down approach
对于任意moment (x, y), 它有两个选择进行状态转移:
(1) (x, x+y)
(2) (x+y, x)
这就构成了二叉树
"""
def reachingPoints(self, sx, sy, tx, ty):
if sx == tx and sy == ty:
return True
if sx > tx or sy > ty:
return False
return self.reachingPoints(sx, sx + sy, tx, ty) or self.reachingPoints(sx + sy, sy, tx, ty)
"""
2. 方法2: Bottom-up approach
观察可知,对于任意child node, 只有一条路可以reach its parent,eventually to the root of the binary tree
这就意味着,我们不需要从(sx, sy)出发,而是从(tx, ty)出发,一直向上搜索,直到碰到 conditions like: sx >= tx or sy >= ty
"""
def reachingPoints2(self, sx, sy, tx, ty):
while sx < tx and sy < ty:
if tx < ty:
ty %= tx
else:
tx %= ty
if sx == tx and sy <= ty and (ty - sy) % sx == 0:
return True
if sy == ty and sx <= tx and (tx - sx) % sy == 0:
return True
return False
|
[
"dw6000@163.com"
] |
dw6000@163.com
|
d9633abec836d4892c3fe68ab2a30b5749261f2b
|
7952f66758b685f4bf045c7eb28efa3a22412a89
|
/백준/1922.py
|
d83d5726dbf2078356865eda4bbcbadf8e1a0325
|
[] |
no_license
|
PingPingE/Algorithm
|
b418fa13528c27840bb220e305933800c5b4c00a
|
89a55309c44320f01d2d6fe5480181a4c5816fd2
|
refs/heads/master
| 2023-08-31T01:43:09.690729
| 2023-08-27T13:12:22
| 2023-08-27T13:12:22
| 172,465,200
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,481
|
py
|
'''
문제)
도현이는 컴퓨터와 컴퓨터를 모두 연결하는 네트워크를 구축하려 한다. 하지만 아쉽게도 허브가 있지 않아 컴퓨터와 컴퓨터를 직접 연결하여야 한다.
그런데 모두가 자료를 공유하기 위해서는 모든 컴퓨터가 연결이 되어 있어야 한다.
(a와 b가 연결이 되어 있다는 말은 a에서 b로의 경로가 존재한다는 것을 의미한다. a에서 b를 연결하는 선이 있고, b와 c를 연결하는 선이 있으면 a와 c는 연결이 되어 있다.)
그런데 이왕이면 컴퓨터를 연결하는 비용을 최소로 하여야 컴퓨터를 연결하는 비용 외에 다른 곳에 돈을 더 쓸 수 있을 것이다.
이제 각 컴퓨터를 연결하는데 필요한 비용이 주어졌을 때 모든 컴퓨터를 연결하는데 필요한 최소비용을 출력하라.
모든 컴퓨터를 연결할 수 없는 경우는 없다.
입력)
첫째 줄에 컴퓨터의 수 N (1 ≤ N ≤ 1000)가 주어진다.
둘째 줄에는 연결할 수 있는 선의 수 M (1 ≤ M ≤ 100,000)가 주어진다.
셋째 줄부터 M+2번째 줄까지 총 M개의 줄에 각 컴퓨터를 연결하는데 드는 비용이 주어진다.
이 비용의 정보는 세 개의 정수로 주어지는데, 만약에 a b c 가 주어져 있다고 하면
a컴퓨터와 b컴퓨터를 연결하는데 비용이 c (1 ≤ c ≤ 10,000) 만큼 든다는 것을 의미한다. a와 b는 같을 수도 있다.
출력)
모든 컴퓨터를 연결하는데 필요한 최소비용을 첫째 줄에 출력한다.
'''
#137432kb 340ms(cnt 추가 후)
#137820kb 712ms(cnt 추가 전)
import sys,heapq
def find(x,links): #부모찾기
if links[x] == x:
return x
links[x] = find(links[x], links)
return links[x]
def union(a,b,links):
a = find(a,links)
b = find(b,links)
if a>b:
links[a] = b
else:
links[b] = a
N = int(input())
M = int(input())
links = {i:i for i in range(1,N+1)}
que = []
heapq.heapify(que)
ans = 0
cnt =0 #연결한 간선 개수 count해서 N-1이면 중단하기 위함 (추가 후 712ms -> 340ms)
for _ in range(M):
a,b,c = map(int,sys.stdin.readline().split())
heapq.heappush(que,[c,a,b])#비용, a컴, b컴
while que:
cost, from_ , to_ = heapq.heappop(que)
if find(from_,links) == find(to_,links):
continue
else:
union(from_, to_, links)
ans += cost
cnt += 1
if cnt == N-1:break
print(ans)
|
[
"ds03023@gmail.com"
] |
ds03023@gmail.com
|
037faa78b61a3f33646a5ddad2d55b313f992ef0
|
f42863aac5aa5cf41fea07a155b3d24cdec94847
|
/oop/system_split/main.py
|
445d75ab1065151f742ef12ba46fb48c28a93c28
|
[] |
no_license
|
stanislavkozlovski/python_exercises
|
bd03a50fd724b0e084cb816560a69359a228358b
|
6c8e75d692c92f15d42e71cbf6a9f8da6bf802cb
|
refs/heads/master
| 2023-05-15T00:19:48.305862
| 2019-04-17T13:49:34
| 2019-04-17T13:49:34
| 64,744,975
| 3
| 2
| null | 2021-06-11T17:43:53
| 2016-08-02T09:48:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,944
|
py
|
import re
from hardware import PowerHardware, HeavyHardware
from software import ExpressSoftwareComponent, LightSoftwareComponent
class TheSystem:
def __init__(self):
self.hardwares = {}
def register_power_hardware(self, name, capacity, memory):
self.hardwares[name] = PowerHardware(name, capacity, memory)
def register_heavy_hardware(self, name, capacity, memory):
self.hardwares[name] = HeavyHardware(name, capacity, memory)
def register_express_software(self, hardware_component_name, name, capacity, memory):
if hardware_component_name not in self.hardwares:
return
soft_comp = ExpressSoftwareComponent(name, capacity, memory)
self.hardwares[hardware_component_name].register_software_component(soft_comp)
def register_light_software(self, hardware_component_name, name, capacity, memory):
if hardware_component_name not in self.hardwares:
return
soft_comp = LightSoftwareComponent(name, capacity, memory)
self.hardwares[hardware_component_name].register_software_component(soft_comp)
def release_software_component(self, hardware_component_name, software_component_name):
if hardware_component_name not in self.hardwares:
return False
self.hardwares[hardware_component_name].release_software_component(software_component_name)
return True
def analyze(self):
print(f"""Software Components
Hardware Components: {len(self.hardwares)}
Software Components: {sum(len(hardware.software_components) for hardware in self.hardwares.values())}
Total Operational Memory: {sum(hardware.used_memory for hardware in self.hardwares.values())} / {sum(hardware.max_memory for hardware in self.hardwares.values())}
Total Capacity Taken: {sum(hardware.used_capacity for hardware in self.hardwares.values())} / {sum(hardware.max_capacity for hardware in self.hardwares.values())}""")
def split(self):
ordered_hardware_components = ([pow_comp for pow_comp in self.hardwares.values()
if isinstance(pow_comp, PowerHardware)]
+ [heavy_comp for heavy_comp in self.hardwares.values()
if isinstance(heavy_comp, HeavyHardware)])
for comp in ordered_hardware_components:
express_soft_count = len([None for soft_comp in comp.software_components.values()
if isinstance(soft_comp, ExpressSoftwareComponent)])
light_soft_count = len([None for light_comp in comp.software_components.values()
if isinstance(light_comp, LightSoftwareComponent)])
print(f'Hardware Component - {comp.name}')
print(f'Express Software Components: {express_soft_count}')
print(f'Light Software Components: {light_soft_count}')
print(f'Memory Usage: {comp.used_memory} / {comp.max_memory}')
print(f'Capacity Usage: {comp.used_capacity} / {comp.max_capacity}')
print(f'Type: {"Power" if isinstance(comp, PowerHardware) else "Heavy"}')
print(f'Software Components {{{", ".join([s_comp.name for s_comp in comp.software_components.values()]) or "None"}}}')
def main():
re_pattern = r'.+\((?P<args>.+)\)'
command = input()
system = TheSystem()
while command != 'System Split':
if command.startswith('RegisterPowerHardware') or command.startswith('RegisterHeavyHardware'):
args = re.match(re_pattern, command).group('args').split(', ')
name = args[0]
capacity = int(args[1])
memory = int(args[2])
if command.startswith('RegisterPowerHardware'):
system.register_power_hardware(name, capacity, memory)
else:
system.register_heavy_hardware(name, capacity, memory)
elif command.startswith('RegisterLightSoftware') or command.startswith('RegisterExpressSoftware'):
args = re.match(re_pattern, command).group('args').split(', ')
hardware_name = args[0]
name = args[1]
capacity = int(args[2])
memory = int(args[3])
if command.startswith('RegisterLightSoftware'):
system.register_light_software(hardware_name, name, capacity, memory)
else:
system.register_express_software(hardware_name, name, capacity, memory)
elif command.startswith('Analyze'):
system.analyze()
elif command.startswith('ReleaseSoftwareComponent'):
args = re.match(re_pattern, command).group('args').split(', ')
hardware_name = args[0]
software_name = args[1]
system.release_software_component(hardware_name, software_name)
command = input()
system.split()
if __name__ == '__main__':
main()
|
[
"familyguyuser192@windowslive.com"
] |
familyguyuser192@windowslive.com
|
b8db621f5d1e3979942cd6b63d3b82486aabdd35
|
ba602dc67ad7bb50133aeb312f3c6c54627b3dec
|
/data/3943/WA_py/518940.py
|
3b0b14e911ecd75205e252df366bd906589fb946
|
[] |
no_license
|
Dearyyyyy/TCG
|
0d21d89275906157372d775f33309ce337e6bc95
|
7b80de16de2d3f5d95a7c4ed95d45a9e38882e67
|
refs/heads/master
| 2020-12-27T23:19:44.845918
| 2020-02-04T01:59:23
| 2020-02-04T01:59:23
| 238,101,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
# coding=utf-8
q=[]
def order(n,begin,end):
global q
if begin>=end:
q+=n
else :
i=begin
for num in range(begin,end):
n[num],n[i]=n[i],n[num]
order(n,begin+1,end)
n[num],n[i]=n[i],n[num]
while True :
n=int(input())
if n==0:
break
list1=[]
for i in range(1,n+1):
list1.append(i)
order(list1,0,n)
list2=[]
temp=1
for h in range(1,n+1):
temp*=h
for j in range(0,temp):
list2.append(q[j*n:j*n+n])
s=sorted(list2)
for r in s :
for c in r:
print(c,end=' ')
print()
|
[
"543271544@qq.com"
] |
543271544@qq.com
|
e7eef688a513394c0ad13daa03a804f5f7b78d13
|
4610d0284416361643095ca9c3f404ad82ca63c2
|
/src/sploitego/xmltools/__init__.py
|
794eae10734f7233002fc04e4d2c183fdddf298b
|
[] |
no_license
|
mshelton/sploitego
|
165a32874d955621c857552fb9692ecf79e77b7e
|
3944451a110f851a626459767d114569d80a158c
|
refs/heads/master
| 2020-12-25T03:11:58.071280
| 2012-08-16T22:33:10
| 2012-08-16T22:33:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
#!/usr/bin/env python
__author__ = 'Nadeem Douba'
__copyright__ = 'Copyright 2012, Sploitego Project'
__credits__ = ['Nadeem Douba']
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Nadeem Douba'
__email__ = 'ndouba@gmail.com'
__status__ = 'Development'
__all__ = [
'objectify',
'oxml'
]
|
[
"ndouba@gmail.com"
] |
ndouba@gmail.com
|
ad6e4a658bf93bbdaa02f01a60c6f4f8f44bb105
|
8f9b10acd4b7b8c94ee65c21e6bdb06555d8f669
|
/anti/migrations/0001_initial.py
|
a78244a7828ec5d6f51faee450d03d6a3de23072
|
[] |
no_license
|
leezichanga/unmask-corruption
|
d848d814118ab785482b57a2c5fcebd5067c2183
|
231cdb517a2c5a0c2a8bd2a0d9cbec9349106a56
|
refs/heads/master
| 2020-03-18T23:26:41.418951
| 2018-06-04T12:59:28
| 2018-06-04T12:59:28
| 135,403,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-03 10:32
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('location', models.CharField(max_length=60)),
('description', models.TextField()),
('email', models.EmailField(max_length=254)),
('time_uploaded', models.DateTimeField(auto_now_add=True, null=True)),
('report', models.TextField()),
('category', models.ManyToManyField(to='anti.categories')),
('editor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-time_uploaded'],
},
),
]
|
[
"elizabbethichanga@yahoo.com"
] |
elizabbethichanga@yahoo.com
|
a88be202976fb580308f95fcb529d4d3e32b61d1
|
1b764845ceab76ab91d12a4a067cb49fa3296001
|
/interface testing/tests/test_laGou.py
|
c3ed52590ca07ee3d1a11b689e4723a7cfee59d2
|
[
"Apache-2.0"
] |
permissive
|
mychristopher/test
|
c5e11aef178d025d25d54afde4fb836a18001a23
|
9977d36bab3fcc47f0e1dd42bbf5a99b39112a2f
|
refs/heads/master
| 2023-07-31T14:58:22.303817
| 2020-09-05T04:26:07
| 2020-09-05T04:26:07
| 276,136,931
| 0
| 0
|
Apache-2.0
| 2023-07-14T16:39:16
| 2020-06-30T15:21:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
#!/use/bin/env python
#coding:utf-8
#Author:WuYa
import unittest
import json
from base.method import Method,IsContent
from page.laGou import *
from utils.public import *
from utils.operationExcel import OperationExcel
from utils.operationJson import OperationJson
class LaGou(unittest.TestCase):
def setUp(self):
self.obj=Method()
self.p=IsContent()
self.execl=OperationExcel()
self.operationJson=OperationJson()
def statusCode(self,r):
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json()['code'], 0)
def isContent(self,r,row):
self.statusCode(r=r)
self.assertTrue(self.p.isContent(row=row,str2=r.text))
def test_laGou_001(self):
'''拉钩:测试翻页'''
r = self.obj.post(row=1,data=self.operationJson.getRequestsData(1))
self.isContent(r=r,row=1)
self.execl.writeResult(1,'pass')
def test_laGou_002(self):
'''拉钩:测试关键字的职位搜索'''
r =self.obj.post(row=1,data=setSo('Python开发工程师'))
list1=[]
for i in range(0,15):
positionId=r.json()['content']['positionResult']['result'][i]['positionId']
list1.append(positionId)
writePositionId(json.dumps(list1))
def test_lgGou_003(self):
'''访问搜索到的每个职位的详情页信息'''
for i in range(15):
r=self.obj.get(url=getUrl()[i])
self.assertTrue(self.p.isContent(2,r.text))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"1366254420@qq.com"
] |
1366254420@qq.com
|
b96ea4e4315057da353749698369e86cf59a095c
|
601533bee6393d550fdf60e66599d9c69fb31a68
|
/tools/nvme_utils/configshell-fb-1.1.fb18/setup.py
|
cb4827ef1b1ab44c478077dbde709c53cba74c99
|
[
"Apache-2.0"
] |
permissive
|
truenas/chelsiouwire
|
7043978301e0e28be6c39ac39bbf483af370464f
|
4739d295d543fc8164c96356d4cb1ed63e091a90
|
refs/heads/master
| 2023-08-21T16:54:22.911750
| 2022-07-13T18:26:48
| 2022-07-13T18:26:48
| 383,461,654
| 1
| 0
| null | 2022-07-13T18:29:13
| 2021-07-06T12:30:41
|
C
|
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
#! /usr/bin/env python
'''
This file is part of ConfigShell.
Copyright (c) 2011-2013 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from setuptools import setup
setup(
name = 'configshell-fb',
version = '1.1.18',
description = 'A framework to implement simple but nice CLIs.',
license = 'Apache 2.0',
maintainer = 'Andy Grover',
maintainer_email = 'agrover@redhat.com',
url = 'http://github.com/agrover/configshell-fb',
packages = ['configshell', 'configshell_fb'],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
],
)
|
[
"waqarahmedjoyia@live.com"
] |
waqarahmedjoyia@live.com
|
73947b723523e74ea45af363ab25a8f80eeaccd7
|
bbc1001ec110c7cd0bf873bcff8519f8e713b42e
|
/dy/asgi.py
|
c700249231a0301e1efd89aab2d973a8372f4911
|
[] |
no_license
|
zamanehsani/dy
|
b141f98ab52bcdc21bfec593fe971b054e2f74f6
|
675915430e20f5c137f00016d99946f5cd814aa9
|
refs/heads/master
| 2023-03-16T18:53:38.416480
| 2021-03-05T14:13:45
| 2021-03-05T14:13:45
| 324,432,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
"""
ASGI config for dy project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dy.settings')
application = get_asgi_application()
|
[
"zamanehsani@gmail.com"
] |
zamanehsani@gmail.com
|
3c2631144ed941c39aba487fac120978f680355c
|
183e4126b2fdb9c4276a504ff3ace42f4fbcdb16
|
/I семестр/Програмування (Python)/Лабораторні/Братун 6305/Labs/LABA7/counter/counter_2.py
|
f72f5c76c3f4a527bff955d3e92c75781b03a0c9
|
[] |
no_license
|
Computer-engineering-FICT/Computer-engineering-FICT
|
ab625e2ca421af8bcaff74f0d37ac1f7d363f203
|
80b64b43d2254e15338060aa4a6d946e8bd43424
|
refs/heads/master
| 2023-08-10T08:02:34.873229
| 2019-06-22T22:06:19
| 2019-06-22T22:06:19
| 193,206,403
| 3
| 0
| null | 2023-07-22T09:01:05
| 2019-06-22T07:41:22
|
HTML
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
import os
os.chdir('C:\lab7\Bratun')
def get_line():
with open( r'v5.txt', "r", encoding="cp1251") as f:
tmp = f.readlines()
f.close()
return tmp
def change():
core = round(len(fileinline) / 2)
for i in range(core):
fileinline[i], fileinline[-i - 1] = fileinline[-i - 1], fileinline[i]
with open( r'v5.txt', "w", encoding="utf-8") as f:
for i in fileinline:
f.write(i)
f.close()
fileinline = get_line()
change()
os.rename( r'v5.txt', r'v51.txt')
|
[
"mazanyan027@gmail.com"
] |
mazanyan027@gmail.com
|
fa9d0ee477d8f85e5679caa53d6217ad43c68753
|
6cd3de9d6aa0c52602010aa857966d5dc4d57442
|
/mlprodict/onnxrt/validate/data/__init__.py
|
68432ec182456d8c49ed21a9d675ee84cce8bc08
|
[
"MIT"
] |
permissive
|
xadupre/mlprodict
|
2307ca96eafeeafff08d5322184399bb5dc1c37e
|
f82c8a26a60104948c67849b1c4af95ca812c153
|
refs/heads/master
| 2022-12-10T18:50:36.953032
| 2020-09-03T08:53:58
| 2020-09-03T08:53:58
| 292,824,744
| 1
| 0
|
NOASSERTION
| 2020-09-04T10:56:45
| 2020-09-04T10:56:44
| null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
"""
@file
@brief Datasets to tests models.
"""
import os
from pandas import read_csv
def load_audit():
"""
Use to test conversion of
:epkg:`sklearn:ensemble:GradientBoostingClassifier`
into :epkg:`ONNX`.
.. runpython::
:showcode:
from mlprodict.onnxrt.validate.data import load_audit
df = load_audit()
print(df.head())
"""
name = os.path.dirname(__file__)
name = os.path.join(name, 'audit.csv')
df = read_csv(name).drop(['ID', 'index'], axis=1, inplace=False).dropna()
return df
|
[
"xavier.dupre@gmail.com"
] |
xavier.dupre@gmail.com
|
518d213b2c92fa19522c41e572a3645a8ff0e7a6
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/RuckusAutoTest/tests/fm/FM_ManageUsers.py
|
ce9d8d8d13ce6dfefd8d1b6b16a7cc23900bf55b
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905
| 2016-08-20T03:34:52
| 2016-08-20T03:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,994
|
py
|
'''
Testsuite
1.1.10.2 Administration - Users
1.1.10.2.4 Create many user(over 100 user account)
MODELING FOR NORMAL CASES
-------------------------
Inputs
- totalUsers
Internal Variables
- name_prefix: likes 'user_'
Expected Results
- able to create more than a hundred of users
Testscript
+ Config:
- Initialize the input config
- Delete all current (delete-able) users
+ Test:
- Create users (by the given totalUsers)
- Go through the list make sure the created users are there
+ Clean up:
- Delete all the users
'''
import os, time, logging, re, random
from datetime import *
from pprint import pprint, pformat
from RuckusAutoTest.common.utils import *
from RuckusAutoTest.models import Test
from RuckusAutoTest.components.RuckusAP import RuckusAP
from RuckusAutoTest.components import Helpers as lib
from RuckusAutoTest.tests.fm.lib_FM import *
class FM_ManageUsers(Test):
required_components=['FM', 'APs']
parameter_description = dict(
totalUsers = '',
)
def config(self, conf):
self.errmsg = None
self.aliases = init_aliases(testbed=self.testbed)
self._cfgTestParams(**conf)
logging.info('Delete all delete-able users')
lib.fm.userMgmt.delete_all_users(self.aliases.fm)
def test(self):
self._create_users()
if self.errmsg: return ('FAIL', self.errmsg)
self._testCreatedUsers()
if self.errmsg: return ('FAIL', self.errmsg)
return ('PASS', '')
def cleanup(self):
logging.info('Delete all delete-able users')
lib.fm.userMgmt.delete_all_users(self.aliases.fm)
self.aliases.fm.logout()
def _cfgTestParams(self, **kwa):
self.p = dict(
totalUsers = 11,
prefix = 'user',
roles = ['Network Administrator', 'Group Administrator',
'Group Operator', 'Device Operator'],
)
self.p.update(kwa)
logging.debug('Test Configs:\n%s' % pformat(self.p))
def _generateUsers(self, **kwa):
'''
kwa:
- prefix
- total
- roles
return:
- something likes 'user_015'
'''
role_len = int(kwa['total'] / len(kwa['roles']))
for i in range(1, kwa['total'] + 1):
role_idx = int(i/role_len)
if role_idx >= len(kwa['roles']):
role_idx = len(kwa['roles']) - 1
yield '%s_%03i' % (kwa['prefix'], i), kwa['roles'][role_idx]
def _create_users(self):
'''
return:
. self.accounts: a list of (name, role)
'''
self.accounts = []
logging.info('Creating Users: prefix=%s, totalUsers=%s' % \
(self.p['prefix'], self.p['totalUsers']))
for name, role in self._generateUsers(prefix=self.p['prefix'],
total=self.p['totalUsers'],
roles=self.p['roles']):
self.accounts.append((name, role))
lib.fm.userMgmt.add_user(self.aliases.fm,
username=name,
password=name,
role=role)
def _testCreatedUsers(self):
logging.info('Test the list of created users')
allAccs = lib.fm.userMgmt.get_all_users(self.aliases.fm)
for a in self.accounts:
if not self._find_user(user=a, list=allAccs):
self.errmsg = 'User created but not found: (name=%s, role=%s)' % a
return
def _find_user(self, **kwa):
'''
kwa:
- user: (name, role)
- list: the list of users from FM Users table
'''
for i in kwa['list']:
if i['username'] == kwa['user'][0]:
if i['userrole'] == kwa['user'][1]:
return True
return False # same user name, but different role
return False
|
[
"tan@xx.com"
] |
tan@xx.com
|
f494dcb113132af32c65f8eef89c23905b98c3fe
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_150/ch14_2020_03_16_15_58_26_897959.py
|
886c16b139252dc0fd5ccfdbc257e43fc08191ee
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
import math
def calcula_distancia_do_projetil(v, θ, y0, g):
parte1 = (v**2)/2*g
parte2 = (1+(1+(2*g*y0)/(v**2*(math.sin(θ))**2))**(1/2))
parte3 = math.sin(2*θ)
return parte1*parte2*parte3
|
[
"you@example.com"
] |
you@example.com
|
081bf346fd7fa7c71ec3a2819deb97613cfcba02
|
e37a1aab1dde2339c7e34ac7e7a158767fcd9023
|
/Solutions/solution_018.py
|
61dd1f757ab7cd414491e152fa2e956eceff3651
|
[
"WTFPL"
] |
permissive
|
idayat092/Project-Euler
|
f90ffa71e12a80b6f54de4f469e898fedc9877fe
|
8fcd0d4cb126612d238516e88778a4bfd685ccf2
|
refs/heads/master
| 2022-03-07T18:21:38.103277
| 2019-11-09T05:30:01
| 2019-11-09T05:30:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
"""Problem 18: Maximum path sum I
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom of the triangle below:
NOTE: As there are only 16384 routes, it is possible to solve this problem by trying every route. However, Problem 67, is the same challenge with a triangle containing one-hundred rows; it cannot be solved by brute force, and requires a clever method! ;o)"""
triangle = [
[75],
[95, 64],
[17, 47, 82],
[18, 35, 87, 10],
[20, 4, 82, 47, 65],
[19, 1, 23, 75, 3, 34],
[88, 2, 77, 73, 7, 63, 67],
[99, 65, 4, 28, 6, 16, 70, 92],
[41, 41, 26, 56, 83, 40, 80, 70, 33],
[41, 48, 72, 33, 47, 32, 37, 16, 94, 29],
[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],
[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],
[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],
[63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],
[4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]
]
max_sequence = triangle[0][0]
last_index = 0
for _ in triangle[1:]:
max_num = max([_[last_index], _[last_index + 1]])
for i, j in enumerate(_):
if j == max_num and i in [last_index, last_index + 1]:
last_index = i
max_sequence += j
print(max_sequence)
|
[
"solomonghost9@gmail.com"
] |
solomonghost9@gmail.com
|
8edb1fa30b3189a1ec7c551c9d73c662805ee0de
|
ddd002cff6b4668c47a14e740ec780f2c03dcbd9
|
/algorithms/quicksort.py
|
30e026c5c0a83e1f09bff25e8aea55ff0667aaab
|
[] |
no_license
|
wangonya/python_practice
|
53b93bdf93b7a586df75040d4c26d25793b59ed9
|
70f45f950fb6e9619e4054cb1ea5f8d8db988f0e
|
refs/heads/master
| 2020-04-02T07:33:06.647605
| 2018-12-19T07:25:26
| 2018-12-19T07:25:26
| 154,202,248
| 2
| 0
| null | 2018-12-16T05:02:05
| 2018-10-22T19:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 526
|
py
|
def quickSort(arr):
less = []
pivotList = []
more = []
if len(arr) <= 1:
return arr
else:
pivot = arr[0]
for i in arr:
if i < pivot:
less.append(i)
elif i > pivot:
more.append(i)
else:
pivotList.append(i)
less = quickSort(less)
more = quickSort(more)
return less + pivotList + more
if __name__ == '__main__':
arr = map(int, input().split())
print(quickSort(list(arr)))
|
[
"kwangonya@gmail.com"
] |
kwangonya@gmail.com
|
a956c5437ef70e9646509d41f4eb1a179730eac6
|
ca0c3c1cdfdd714c7780c27fcecd4a2ae39d1474
|
/src/fmf/apps/core/migrations/0001_initial.py
|
19696394a44e5650c93214f629650c85c09d6a98
|
[] |
no_license
|
vasyabigi/fmf
|
fce88a45fb47f3f7652995af40b567ffdf27a4a0
|
988ba668f3ce6da2670b987a1eeae3c87761eac5
|
refs/heads/master
| 2021-01-23T07:29:52.185306
| 2012-08-27T13:11:51
| 2012-08-27T13:11:51
| 2,803,493
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,435
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IndexSliderImage'
db.create_table('core_indexsliderimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['flatpages.FlatPage'], unique=True)),
('image', self.gf('sorl.thumbnail.fields.ImageField')(max_length=100)),
('position', self.gf('django.db.models.fields.IntegerField')(default=-1)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('core', ['IndexSliderImage'])
def backwards(self, orm):
# Deleting model 'IndexSliderImage'
db.delete_table('core_indexsliderimage')
models = {
'core.indexsliderimage': {
'Meta': {'ordering': "('position',)", 'object_name': 'IndexSliderImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'page': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['flatpages.FlatPage']", 'unique': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '-1'})
},
'flatpages.flatpage': {
'Meta': {'ordering': "('url',)", 'object_name': 'FlatPage', 'db_table': "'django_flatpage'"},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'enable_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'registration_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title_uk': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['core']
|
[
"vasyl.stanislavchuk@djangostars.com"
] |
vasyl.stanislavchuk@djangostars.com
|
61a6e75b11db9f3519abe6b262fd42d9f9963545
|
1f63dde39fcc5f8be29f2acb947c41f1b6f1683e
|
/Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/tfprof/python/tools/tfprof/print_model_analysis_test.py
|
c3e9fc9cc099f144f81235a944221fa05b6b398c
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
koobonil/Boss2D
|
09ca948823e0df5a5a53b64a10033c4f3665483a
|
e5eb355b57228a701495f2660f137bd05628c202
|
refs/heads/master
| 2022-10-20T09:02:51.341143
| 2019-07-18T02:13:44
| 2019-07-18T02:13:44
| 105,999,368
| 7
| 2
|
MIT
| 2022-10-04T23:31:12
| 2017-10-06T11:57:07
|
C++
|
UTF-8
|
Python
| false
| false
| 7,110
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.tools.tfprof import tfprof_options_pb2
from tensorflow.tools.tfprof import tfprof_output_pb2
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.contrib.tfprof.python.tools.tfprof import pywrap_tensorflow_print_model_analysis_lib as print_mdl
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'device_regexes': ['.*'],
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'output': 'stdout',
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [6, 6, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
def testPrintModelAnalysis(self):
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = TEST_OPTIONS['max_depth']
opts.min_bytes = TEST_OPTIONS['min_bytes']
opts.min_micros = TEST_OPTIONS['min_micros']
opts.min_params = TEST_OPTIONS['min_params']
opts.min_float_ops = TEST_OPTIONS['min_float_ops']
for p in TEST_OPTIONS['device_regexes']:
opts.device_regexes.append(p)
opts.order_by = TEST_OPTIONS['order_by']
for p in TEST_OPTIONS['account_type_regexes']:
opts.account_type_regexes.append(p)
for p in TEST_OPTIONS['start_name_regexes']:
opts.start_name_regexes.append(p)
for p in TEST_OPTIONS['trim_name_regexes']:
opts.trim_name_regexes.append(p)
for p in TEST_OPTIONS['show_name_regexes']:
opts.show_name_regexes.append(p)
for p in TEST_OPTIONS['hide_name_regexes']:
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = TEST_OPTIONS['account_displayed_op_only']
for p in TEST_OPTIONS['select']:
opts.select.append(p)
opts.output = TEST_OPTIONS['output']
with session.Session() as sess, ops.device('/cpu:0'):
_ = self._BuildSmallModel()
tfprof_pb = tfprof_output_pb2.TFGraphNodeProto()
tfprof_pb.ParseFromString(
print_mdl.PrintModelAnalysis(
sess.graph.as_graph_def().SerializeToString(),
b'', b'', b'scope', opts.SerializeToString()))
expected_pb = tfprof_output_pb2.TFGraphNodeProto()
text_format.Merge(r"""name: "_TFProfRoot"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "Conv2D"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW"
exec_micros: 0
requested_bytes: 0
parameters: 648
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "DW/Assign"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal/RandomStandardNormal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mean"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mul"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/shape"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/stddev"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/read"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "zeros"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0""", expected_pb)
self.assertEqual(expected_pb, tfprof_pb)
if __name__ == '__main__':
test.main()
|
[
"slacealic@gmail.com"
] |
slacealic@gmail.com
|
eabe3ee069129823ec57dca7dea81d3a8c2f7f0f
|
de64b143a346585f51590bd674e8d13bbc672386
|
/algorithm/2023/0530_743_Network_Delay_Time/Euihyun.py
|
7b37ba25dc97c748f4d9722f0e74732f52b04da8
|
[] |
no_license
|
ai-kmu/etc
|
304ec20f59e4026025abdcbcae21863c80630dcb
|
9c29941e19b7dd2a2037b110dd6e16690e9a0cc2
|
refs/heads/master
| 2023-08-21T16:30:31.149956
| 2023-08-21T16:26:19
| 2023-08-21T16:26:19
| 199,843,899
| 3
| 24
| null | 2023-05-31T09:56:59
| 2019-07-31T11:36:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,766
|
py
|
# 흠 오랜만에 다익스트라 푸니까 못풀겠네요... 솔루션 봤습니다.
# 그래서 리뷰는 괜찮아요!
import heapq
class Solution(object):
def networkDelayTime(self, times, n, k):
# 그래프 초기화
# 인접 리스트로 그래프 표현
graph = [[] for _ in range(n+1)]
for u, v, w in times:
# u에서 v로 가는 간선의 정보 저장
graph[u].append((v, w))
# 최단 거리 초기화
dist = [float('inf')] * (n+1)
# 출발 노드의 최단 거리 0
dist[k] = 0
# 우선순위 큐 초기화
# (거리, 노드) 쌍을 저장하는 우선순위 큐
pq = [(0, k)]
while pq:
# 우선순위 큐에서 가장 작은 거리를 가지는 노드 선택
d, node = heapq.heappop(pq)
# 이미 처리된 노드 스킵
if d > dist[node]:
continue
# 인접 노드 순회
for neighbor, delay in graph[node]:
# 현재 노드를 거쳐 인접 노드로 가는 거리 계산
new_dist = d + delay
# 더 짧은 거리를 찾은 경우
if new_dist < dist[neighbor]:
# 최단 거리 업데이트
# 우선순위 큐에 삽입
dist[neighbor] = new_dist
heapq.heappush(pq, (new_dist, neighbor))
# 모든 노드의 최단 거리 중 가장 큰 값을 찾음
max_delay = max(dist[1:])
# 모든 노드에 도달할 수 없는 경우 0 아님 max리턴
if max_delay == float('inf'):
return -1
else:
return max_delay
|
[
"noreply@github.com"
] |
ai-kmu.noreply@github.com
|
fa5e2514f9cf03fda65eafbc2e83da73caeab49e
|
3abe579ffc36cffef882bd23139c8112dc18b5eb
|
/nodeeditor/node_edge.py
|
7380edde1767614f6725da1367c056ef702d0e36
|
[] |
no_license
|
huazhicai/nodeEditor
|
3663950221a2fa8ee9a9aa9f5718d20722d9ecf4
|
cb9ed8c1e57614c9bc2242b948ce8361d71fa4be
|
refs/heads/master
| 2020-07-06T05:34:55.186396
| 2019-09-02T08:12:38
| 2019-09-02T08:12:38
| 202,908,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,970
|
py
|
from nodeeditor.node_graphics_edge import *
EDGE_TYPE_DIRECT = 1
EDGE_TYPE_BEZIER = 2
DEBUG = False
class Edge(Serializable):
def __init__(self, scene, start_socket=None, end_socket=None, edge_type=EDGE_TYPE_DIRECT):
super().__init__()
self.scene = scene
# default init
self._start_socket = None
self._end_socket = None
self.start_socket = start_socket
self.end_socket = end_socket
self.edge_type = edge_type
self.scene.addEdge(self)
def __str__(self):
return "<Edge %s..%s>" % (hex(id(self))[2:5], hex(id(self))[-3:])
@property
def start_socket(self):
return self._start_socket
@start_socket.setter
def start_socket(self, value):
# if we were assigned to some socket before, delete us from the socket
if self._start_socket is not None:
self._start_socket.removeEdge(self)
# assign new start socket
self._start_socket = value
# addEdge to the Socket class
if self.start_socket is not None:
self.start_socket.addEdge(self)
@property
def end_socket(self):
return self._end_socket
@end_socket.setter
def end_socket(self, value):
# if we were assigned to some socket before, delete us from the socket
if self._end_socket is not None:
self._end_socket.removeEdge(self)
# assign new end socket
self._end_socket = value
# addEdge to the Socket class
if self.end_socket is not None:
self.end_socket.addEdge(self)
@property
def edge_type(self):
return self._edge_type
@edge_type.setter
def edge_type(self, value):
if hasattr(self, 'grEdge') and self.grEdge is not None:
self.scene.grScene.removeItem(self.grEdge)
self._edge_type = value
if self.edge_type == EDGE_TYPE_DIRECT:
self.grEdge = QDMGraphicsEdgeDirect(self)
elif self.edge_type == EDGE_TYPE_BEZIER:
self.grEdge = QDMGraphicsEdgeBezier(self)
else:
self.grEdge = QDMGraphicsEdgeBezier(self)
self.scene.grScene.addItem(self.grEdge)
if self.start_socket is not None:
self.updatePositions()
def updatePositions(self):
source_pos = self.start_socket.getSocketPosition()
source_pos[0] += self.start_socket.node.grNode.pos().x()
source_pos[1] += self.start_socket.node.grNode.pos().y()
self.grEdge.setSource(*source_pos)
if self.end_socket is not None:
end_pos = self.end_socket.getSocketPosition()
end_pos[0] += self.end_socket.node.grNode.pos().x()
end_pos[1] += self.end_socket.node.grNode.pos().y()
self.grEdge.setDestination(*end_pos)
else:
self.grEdge.setDestination(*source_pos)
self.grEdge.update()
def remove_from_sockets(self):
self.end_socket = None
self.start_socket = None
def remove(self):
if DEBUG: print("# Removing Edge", self)
if DEBUG: print(" - remove edge from all sockets")
self.remove_from_sockets()
if DEBUG: print(" - remove grEdge")
self.scene.grScene.removeItem(self.grEdge)
self.grEdge = None
if DEBUG: print(" - remove edge from scene")
try:
self.scene.removeEdge(self)
except ValueError:
pass
if DEBUG: print(" - everything is done.")
def serialize(self):
return OrderedDict([
('id', self.id),
('edge_type', self.edge_type),
('start', self.start_socket.id),
('end', self.end_socket.id),
])
def deserialize(self, data, hashmap={}, restore_id=True):
if restore_id: self.id = data['id']
self.start_socket = hashmap[data['start']]
self.end_socket = hashmap[data['end']]
self.edge_type = data['edge_type']
|
[
"936844218@qq.com"
] |
936844218@qq.com
|
c6a43174111768e064e7f6c251e16fa309e4c6ac
|
057d2d1e2a78fc89851154e87b0b229e1e1f003b
|
/venv/Lib/site-packages/keystoneauth1/tests/unit/k2k_fixtures.py
|
f78cb0ef084ac7c36a3534ff0e66d62666f5d365
|
[
"Apache-2.0"
] |
permissive
|
prasoon-uta/IBM-Cloud-Secure-File-Storage
|
276dcbd143bd50b71121a73bc01c8e04fe3f76b0
|
82a6876316715efbd0b492d0d467dde0ab26a56b
|
refs/heads/master
| 2022-12-13T00:03:31.363281
| 2018-02-22T02:24:11
| 2018-02-22T02:24:11
| 122,420,622
| 0
| 2
|
Apache-2.0
| 2022-12-08T05:15:19
| 2018-02-22T02:26:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,454
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
UNSCOPED_TOKEN_HEADER = 'UNSCOPED_TOKEN'
UNSCOPED_TOKEN = {
"token": {
"issued_at": "2014-06-09T09:48:59.643406Z",
"extras": {},
"methods": ["token"],
"expires_at": "2014-06-09T10:48:59.643375Z",
"user": {
"OS-FEDERATION": {
"identity_provider": {
"id": "testshib"
},
"protocol": {
"id": "saml2"
},
"groups": [
{"id": "1764fa5cf69a49a4918131de5ce4af9a"}
]
},
"id": "testhib%20user",
"name": "testhib user"
}
}
}
SAML_ENCODING = "<?xml version='1.0' encoding='UTF-8'?>"
TOKEN_SAML_RESPONSE = """
<ns2:Response Destination="http://beta.example.com/Shibboleth.sso/POST/ECP"
ID="8c21de08d2f2435c9acf13e72c982846"
IssueInstant="2015-03-25T14:43:21Z"
Version="2.0">
<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
http://keystone.idp/v3/OS-FEDERATION/saml2/idp
</saml:Issuer>
<ns2:Status>
<ns2:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</ns2:Status>
<saml:Assertion ID="a5f02efb0bff4044b294b4583c7dfc5d"
IssueInstant="2015-03-25T14:43:21Z" Version="2.0">
<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
http://keystone.idp/v3/OS-FEDERATION/saml2/idp</saml:Issuer>
<xmldsig:Signature>
<xmldsig:SignedInfo>
<xmldsig:CanonicalizationMethod
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<xmldsig:SignatureMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
<xmldsig:Reference URI="#a5f02efb0bff4044b294b4583c7dfc5d">
<xmldsig:Transforms>
<xmldsig:Transform
Algorithm="http://www.w3.org/2000/09/xmldsig#
enveloped-signature"/>
<xmldsig:Transform
Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
</xmldsig:Transforms>
<xmldsig:DigestMethod
Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<xmldsig:DigestValue>
0KH2CxdkfzU+6eiRhTC+mbObUKI=
</xmldsig:DigestValue>
</xmldsig:Reference>
</xmldsig:SignedInfo>
<xmldsig:SignatureValue>
m2jh5gDvX/1k+4uKtbb08CHp2b9UWsLw
</xmldsig:SignatureValue>
<xmldsig:KeyInfo>
<xmldsig:X509Data>
<xmldsig:X509Certificate>...</xmldsig:X509Certificate>
</xmldsig:X509Data>
</xmldsig:KeyInfo>
</xmldsig:Signature>
<saml:Subject>
<saml:NameID>admin</saml:NameID>
<saml:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<saml:SubjectConfirmationData
NotOnOrAfter="2015-03-25T15:43:21.172385Z"
Recipient="http://beta.example.com/Shibboleth.sso/POST/ECP"/>
</saml:SubjectConfirmation>
</saml:Subject>
<saml:AuthnStatement AuthnInstant="2015-03-25T14:43:21Z"
SessionIndex="9790eb729858456f8a33b7a11f0a637e"
SessionNotOnOrAfter="2015-03-25T15:43:21.172385Z">
<saml:AuthnContext>
<saml:AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:Password
</saml:AuthnContextClassRef>
<saml:AuthenticatingAuthority>
http://keystone.idp/v3/OS-FEDERATION/saml2/idp
</saml:AuthenticatingAuthority>
</saml:AuthnContext>
</saml:AuthnStatement>
<saml:AttributeStatement>
<saml:Attribute Name="openstack_user"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute Name="openstack_roles"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
<saml:Attribute Name="openstack_project"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">
<saml:AttributeValue xsi:type="xs:string">admin</saml:AttributeValue>
</saml:Attribute>
</saml:AttributeStatement>
</saml:Assertion>
</ns2:Response>
"""
TOKEN_BASED_SAML = ''.join([SAML_ENCODING, TOKEN_SAML_RESPONSE])
ECP_ENVELOPE = """
<ns0:Envelope
xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:ns1="urn:oasis:names:tc:SAML:2.0:profiles:SSO:ecp"
xmlns:ns2="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
xmlns:xmldsig="http://www.w3.org/2000/09/xmldsig#"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ns0:Header>
<ns1:RelayState
ns0:actor="http://schemas.xmlsoap.org/soap/actor/next"
ns0:mustUnderstand="1">
ss:mem:1ddfe8b0f58341a5a840d2e8717b0737
</ns1:RelayState>
</ns0:Header>
<ns0:Body>
{0}
</ns0:Body>
</ns0:Envelope>
""".format(TOKEN_SAML_RESPONSE)
TOKEN_BASED_ECP = ''.join([SAML_ENCODING, ECP_ENVELOPE])
|
[
"prasoon1812@gmail.com"
] |
prasoon1812@gmail.com
|
620722b8e666fc4571d412bce02d149322242609
|
07b41bc2423e5d073ddcd1da47e534d981ccbe78
|
/backend/marjan_fashion_24858/wsgi.py
|
73586b0a2cd945c35d8ed73ebf117886439c5c00
|
[] |
no_license
|
crowdbotics-apps/marjan-fashion-24858
|
e1fe2cba555d9fef7b17d74e31cd9dc7b72f8a80
|
aafa62f5fb59466ce4028e774d7e401dc712294f
|
refs/heads/master
| 2023-03-21T15:28:10.057904
| 2021-03-05T04:12:52
| 2021-03-05T04:12:52
| 344,690,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
"""
WSGI config for marjan_fashion_24858 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "marjan_fashion_24858.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
148a6f9052995914110fda1fdc097a42f961f4ed
|
bc5c8585b401ebcb90932571cae11b9603e2ef56
|
/tests/test_modules/test_pmac/test_rawmotorcspart.py
|
fd4179ec25a4cec13f729c1f71ef46e207b2dd2c
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jamesmudd/pymalcolm
|
8e22a9e20868d9f0d04dfb3a6b276aaa3e202590
|
66993756c802cadca70ea07b437c5c2395af3f67
|
refs/heads/master
| 2020-04-19T10:54:38.766494
| 2019-01-29T12:48:22
| 2019-01-29T12:48:22
| 168,153,411
| 0
| 0
| null | 2019-01-29T12:46:41
| 2019-01-29T12:46:41
| null |
UTF-8
|
Python
| false
| false
| 2,817
|
py
|
import unittest
from mock import patch
from malcolm.core import Process, AlarmSeverity
from malcolm.modules.builtin.controllers import StatefulController
from malcolm.modules.pmac.parts import RawMotorCSPart
class castr(str):
ok = True
severity = 0
class caenum(int):
ok = True
severity = 0
enums = ["ANYTHING", "BRICK1CS1", "BRICK1CS2"]
@patch("malcolm.modules.ca.util.catools")
class TestRawMotorCSPart(unittest.TestCase):
def setUp(self):
self.process = Process("proc")
self.o = RawMotorCSPart("cs", "PV:PRE")
c = StatefulController("mri")
c.add_part(self.o)
self.process.add_controller(c)
self.b = self.process.block_view("mri")
self.addCleanup(self.process.stop)
def do_init(self, catools):
catools.caget.side_effect = [[
caenum(2), castr("I"),
caenum(1), castr("A")
]]
self.process.start()
def test_init(self, catools):
self.do_init(catools)
catools.caget.assert_called_once_with(
["PV:PRE:CsPort", "PV:PRE:CsAxis", "PV:PRE:CsPort_RBV",
"PV:PRE:CsAxis_RBV"], format=catools.FORMAT_CTRL)
assert list(self.b) == [
'meta', 'health', 'state', 'disable', 'reset', 'cs']
assert self.b.cs.value == "BRICK1CS1,A"
def test_update_axis(self, catools):
self.do_init(catools)
update = castr("I")
self.o._update_value(update, 1)
assert self.b.cs.value == "BRICK1CS1,I"
def test_update_port(self, catools):
self.do_init(catools)
update = caenum(2)
self.o._update_value(update, 0)
assert self.b.cs.value == "BRICK1CS2,A"
def test_update_disconnect(self, catools):
self.do_init(catools)
update = caenum(0)
self.o._update_value(update, 0)
assert self.b.cs.value == ""
def test_update_bad(self, catools):
self.do_init(catools)
update = castr("")
update.ok = False
self.o._update_value(update, 1)
assert self.b.cs.value == ""
assert self.b.cs.alarm.severity == AlarmSeverity.INVALID_ALARM
def test_caput(self, catools):
self.do_init(catools)
catools.caget.side_effect = [[caenum(2), castr("Y")]]
self.o.caput("BRICK1CS2,X")
catools.caput.assert_called_once_with(
['PV:PRE:CsPort', 'PV:PRE:CsAxis'], (2, 'X'), wait=True
)
assert self.b.cs.value == "BRICK1CS2,Y"
def test_caput_none(self, catools):
self.do_init(catools)
catools.caget.side_effect = [[caenum(0), castr("")]]
self.o.caput("")
catools.caput.assert_called_once_with(
['PV:PRE:CsPort', 'PV:PRE:CsAxis'], (0, ''), wait=True
)
assert self.b.cs.value == ""
|
[
"tom.cobb@diamond.ac.uk"
] |
tom.cobb@diamond.ac.uk
|
b1abce58ea0b8c1778eb52d4460c4b39051b3a84
|
5e381364c2ab31ff3618369085afffba6caa8edb
|
/recipes/vulkan-memory-allocator/all/test_package/conanfile.py
|
7ef741773e9b018c9294979a6fd565987ff4c823
|
[
"MIT"
] |
permissive
|
CAMOBAP/conan-center-index
|
16aea68a6d22da22831ba985773125e8eda08f00
|
67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1
|
refs/heads/master
| 2023-07-30T08:58:57.285571
| 2021-10-02T14:57:54
| 2021-10-02T14:57:54
| 323,262,699
| 1
| 0
|
MIT
| 2021-05-29T13:37:04
| 2020-12-21T07:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 453
|
py
|
import os
from conans import ConanFile, CMake, tools
class VulkanMemoryAllocatorTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join("bin", "example")
self.run(bin_path, run_environment=True)
|
[
"noreply@github.com"
] |
CAMOBAP.noreply@github.com
|
28a9ef3e533c226aefda479bb8b0e46a24cb8edb
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/point-mutations/4b3ba2b7e4d141ba80593132e1355e5f.py
|
353661c3c32ce019cb5d2a587d707fb05742eeeb
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
class DNA(object):
"""Class representing DNA strand."""
def __init__(self, strand):
self.strand = strand
def hamming_distance(self, strand):
"""Method calculating Hamming distance between object's DNA strand
and given one."""
n = min(len(self.strand), len(strand))
k = 0
for i in range(0, n):
if self.strand[i] != strand[i]:
k += 1
return k
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
7aeec444fe3ba90e5d5ed5656787c5901123d63e
|
f3179576d85369e9834df0810d57289c04c0b929
|
/account/migrations/0001_initial.py
|
a779a9f50a0b638e654c6fe796a6aac162e6f182
|
[] |
no_license
|
kolamor/template_django_site
|
cadd75931b460790a162ee6d5e3371140fd2098e
|
ee7972ca1a65e006a750bbe67f6605ac89248956
|
refs/heads/master
| 2022-12-11T00:43:17.809358
| 2020-09-16T20:21:27
| 2020-09-16T20:21:27
| 295,774,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
# Generated by Django 3.1.1 on 2020-09-14 20:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_birth', models.DateField(blank=True, null=True)),
('photo', models.ImageField(blank=True, upload_to='users/%Y/%m/%d/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"kolamorev@mail.ru"
] |
kolamorev@mail.ru
|
85665dbe43d1c321ad12f2506b472e4a46306b5e
|
ac32e4a76b519a4f9d043ebb3835ad42b9ff6a3c
|
/tests/tokenize/test_tokenize_json.py
|
80ca3e3ae813a3479c5e0bcd38471a72225a6e8e
|
[
"BSD-3-Clause"
] |
permissive
|
rubbish822/typesystem
|
e63286f43672aa2d860c7dffe18431e3984f7439
|
fe201aecf871b3995b2d73ef647d34c1fedb36f1
|
refs/heads/master
| 2023-04-02T15:11:43.780011
| 2019-03-11T14:07:46
| 2019-03-11T14:07:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,167
|
py
|
from json.decoder import JSONDecodeError
import pytest
from typesystem.tokenize.tokenize_json import tokenize_json
from typesystem.tokenize.tokens import DictToken, ListToken, ScalarToken
def test_tokenize_object():
token = tokenize_json('{"a": [1, 2, 3], "b": "test"}')
expected = DictToken(
{
ScalarToken("a", 1, 3): ListToken(
[ScalarToken(1, 7, 7), ScalarToken(2, 10, 10), ScalarToken(3, 13, 13)],
6,
14,
),
ScalarToken("b", 17, 19): ScalarToken("test", 22, 27),
},
0,
28,
)
assert repr(token) == 'DictToken(\'{"a": [1, 2, 3], "b": "test"}\')'
assert token == expected
assert token.value == {"a": [1, 2, 3], "b": "test"}
assert token.lookup(["a"]).value == [1, 2, 3]
assert token.lookup(["a"]).string == "[1, 2, 3]"
assert token.lookup(["a"]).start.line_no == 1
assert token.lookup(["a"]).start.column_no == 7
assert token.lookup_key(["a"]).value == "a"
assert token.lookup_key(["a"]).string == '"a"'
assert token.lookup_key(["a"]).start.char_index == 1
assert token.lookup_key(["a"]).end.char_index == 3
def test_tokenize_list():
token = tokenize_json("[true, false, null]")
expected = ListToken(
[ScalarToken(True, 1, 4), ScalarToken(False, 7, 11), ScalarToken(None, 14, 17)],
0,
18,
)
assert token == expected
assert token.value == [True, False, None]
assert token.lookup([0]).value is True
assert token.lookup([0]).string == "true"
assert token.lookup([0]).start.char_index == 1
assert token.lookup([0]).end.char_index == 4
def test_tokenize_floats():
token = tokenize_json("[100.0, 1.0E+2, 1E+2]")
expected = ListToken(
[
ScalarToken(100.0, 1, 5),
ScalarToken(100.0, 8, 13),
ScalarToken(100.0, 16, 19),
],
0,
20,
)
assert token == expected
assert token.value == [100.0, 1.0e2, 1e2]
assert token.lookup([0]).value == 100.0
assert token.lookup([0]).string == "100.0"
assert token.lookup([0]).start.char_index == 1
assert token.lookup([0]).end.char_index == 5
def test_tokenize_whitespace():
token = tokenize_json("{ }")
expected = DictToken({}, 0, 2)
assert token == expected
assert token.value == {}
assert token.string == "{ }"
token = tokenize_json('{ "a" : 1 }')
expected = DictToken({ScalarToken("a", 2, 4): ScalarToken(1, 9, 9)}, 0, 11)
assert token == expected
assert token.value == {"a": 1}
assert token.lookup(["a"]).value == 1
assert token.lookup(["a"]).string == "1"
assert token.lookup(["a"]).start.char_index == 9
assert token.lookup(["a"]).end.char_index == 9
assert token.lookup_key(["a"]).value == "a"
assert token.lookup_key(["a"]).string == '"a"'
assert token.lookup_key(["a"]).start.char_index == 2
assert token.lookup_key(["a"]).end.char_index == 4
def test_tokenize_parse_errors():
with pytest.raises(JSONDecodeError) as exc_info:
tokenize_json("{")
exc = exc_info.value
assert exc.msg == "Expecting property name enclosed in double quotes"
assert exc.pos == 1
with pytest.raises(JSONDecodeError) as exc_info:
tokenize_json('{"a"')
exc = exc_info.value
assert exc.msg == "Expecting ':' delimiter"
assert exc.pos == 4
with pytest.raises(JSONDecodeError) as exc_info:
tokenize_json('{"a":')
exc = exc_info.value
assert exc.msg == "Expecting value"
assert exc.pos == 5
with pytest.raises(JSONDecodeError) as exc_info:
tokenize_json('{"a":1')
exc = exc_info.value
assert exc.msg == "Expecting ',' delimiter"
assert exc.pos == 6
with pytest.raises(JSONDecodeError) as exc_info:
tokenize_json('{"a":1,1')
exc = exc_info.value
assert exc.msg == "Expecting property name enclosed in double quotes"
assert exc.pos == 7
with pytest.raises(JSONDecodeError) as exc_info:
tokenize_json('{"a":1 "b"')
exc = exc_info.value
assert exc.msg == "Expecting ',' delimiter"
assert exc.pos == 7
|
[
"tom@tomchristie.com"
] |
tom@tomchristie.com
|
e8eed13eedde2a1f73d5dd4d40462d475f7aef3f
|
a367a015dbc36287ca933955ded1ee58b5a2a61a
|
/swagger_client/models/fluid_consumption_rate_through_nozzle.py
|
925475d390d001c905e9a980b4d6954c0d2df360
|
[] |
no_license
|
kerniee/inno_intership_1_test_task
|
70211e153450011c427df595a02e3574dfe7ed9f
|
fc0619ef54b00806a3b59f3c07c1c1684682d65b
|
refs/heads/master
| 2023-05-23T02:24:40.083723
| 2021-06-21T16:15:04
| 2021-06-21T16:15:04
| 365,855,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,525
|
py
|
# coding: utf-8
"""
Teleagronom
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class FluidConsumptionRateThroughNozzle(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'nozzle_color': 'AllOfFluidConsumptionRateThroughNozzleNozzleColor',
'nozzle_pressure': 'AllOfFluidConsumptionRateThroughNozzleNozzlePressure',
'fluid_consumption_rate': 'float'
}
attribute_map = {
'id': 'id',
'nozzle_color': 'nozzle_color',
'nozzle_pressure': 'nozzle_pressure',
'fluid_consumption_rate': 'fluid_consumption_rate'
}
def __init__(self, id=None, nozzle_color=None, nozzle_pressure=None, fluid_consumption_rate=None): # noqa: E501
"""FluidConsumptionRateThroughNozzle - a model defined in Swagger""" # noqa: E501
self._id = None
self._nozzle_color = None
self._nozzle_pressure = None
self._fluid_consumption_rate = None
self.discriminator = None
self.id = id
self.nozzle_color = nozzle_color
self.nozzle_pressure = nozzle_pressure
self.fluid_consumption_rate = fluid_consumption_rate
@property
def id(self):
"""Gets the id of this FluidConsumptionRateThroughNozzle. # noqa: E501
:return: The id of this FluidConsumptionRateThroughNozzle. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this FluidConsumptionRateThroughNozzle.
:param id: The id of this FluidConsumptionRateThroughNozzle. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def nozzle_color(self):
"""Gets the nozzle_color of this FluidConsumptionRateThroughNozzle. # noqa: E501
:return: The nozzle_color of this FluidConsumptionRateThroughNozzle. # noqa: E501
:rtype: AllOfFluidConsumptionRateThroughNozzleNozzleColor
"""
return self._nozzle_color
@nozzle_color.setter
def nozzle_color(self, nozzle_color):
"""Sets the nozzle_color of this FluidConsumptionRateThroughNozzle.
:param nozzle_color: The nozzle_color of this FluidConsumptionRateThroughNozzle. # noqa: E501
:type: AllOfFluidConsumptionRateThroughNozzleNozzleColor
"""
if nozzle_color is None:
raise ValueError("Invalid value for `nozzle_color`, must not be `None`") # noqa: E501
self._nozzle_color = nozzle_color
@property
def nozzle_pressure(self):
"""Gets the nozzle_pressure of this FluidConsumptionRateThroughNozzle. # noqa: E501
:return: The nozzle_pressure of this FluidConsumptionRateThroughNozzle. # noqa: E501
:rtype: AllOfFluidConsumptionRateThroughNozzleNozzlePressure
"""
return self._nozzle_pressure
@nozzle_pressure.setter
def nozzle_pressure(self, nozzle_pressure):
"""Sets the nozzle_pressure of this FluidConsumptionRateThroughNozzle.
:param nozzle_pressure: The nozzle_pressure of this FluidConsumptionRateThroughNozzle. # noqa: E501
:type: AllOfFluidConsumptionRateThroughNozzleNozzlePressure
"""
if nozzle_pressure is None:
raise ValueError("Invalid value for `nozzle_pressure`, must not be `None`") # noqa: E501
self._nozzle_pressure = nozzle_pressure
@property
def fluid_consumption_rate(self):
"""Gets the fluid_consumption_rate of this FluidConsumptionRateThroughNozzle. # noqa: E501
:return: The fluid_consumption_rate of this FluidConsumptionRateThroughNozzle. # noqa: E501
:rtype: float
"""
return self._fluid_consumption_rate
@fluid_consumption_rate.setter
def fluid_consumption_rate(self, fluid_consumption_rate):
"""Sets the fluid_consumption_rate of this FluidConsumptionRateThroughNozzle.
:param fluid_consumption_rate: The fluid_consumption_rate of this FluidConsumptionRateThroughNozzle. # noqa: E501
:type: float
"""
if fluid_consumption_rate is None:
raise ValueError("Invalid value for `fluid_consumption_rate`, must not be `None`") # noqa: E501
self._fluid_consumption_rate = fluid_consumption_rate
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FluidConsumptionRateThroughNozzle, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FluidConsumptionRateThroughNozzle):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"karapys.mor@gmail.com"
] |
karapys.mor@gmail.com
|
7ab87edf7fb834a8eaaf7973e70732fa4778a974
|
9dd33333caebc0b6f516ededa9aeebe707476852
|
/qc/qc/middlewares.py
|
78bd401ea4445eed71a7954552fa2c3aba118b7f
|
[] |
no_license
|
zx490336534/spider-review
|
4ef0cebd9d92bce0a42f4aeef5b03ae9695c2364
|
c7a3cdf7abc74732c556bc4b8c9928a20e7c4c78
|
refs/heads/master
| 2020-04-09T02:20:26.533103
| 2018-12-03T15:04:15
| 2018-12-03T15:04:15
| 159,937,120
| 13
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
import random
import redis
import hashlib
from scrapy import signals
from scrapy.exceptions import IgnoreRequest
from qc.settings import USER_AGENT as ua_list
class QcSpiderMiddleware(object):
"""
给每一个请求随机切换一个User-Agent
"""
def process_request(self, request, spider):
user_agent = random.choice(ua_list)
request.headers['User-Agent'] = user_agent
class QcRedisMiddleware(object):
"""
把每一个URL放到redis.set中,防止重复爬取
"""
def __init__(self):
self.sr = redis.StrictRedis(host='localhost', port=6379, db=1)
def process_request(self, request, spider):
if request.url.startswith('https://jobs.51job.com/'):
# MD5压缩详情页链接(必须是bytes类型数据)
url_md5 = hashlib.md5(request.url.encode()).hexdigest()
result = self.sr.sadd('qc_url', url_md5)
if not result:
raise IgnoreRequest
|
[
"490336534@qq.com"
] |
490336534@qq.com
|
a1be92f9773850b2fb15a33da32c81a191883f7c
|
2c8c415c1b386eb7e168a7cb17d55a73ab08ac36
|
/UrlHandler/migrations/0001_initial.py
|
a28550bdde5c621735367268f25788211c6fe269
|
[
"MIT"
] |
permissive
|
TanimSk/Help-The-Helpless
|
888b72b3faca9d307f5a0e41c05424050bc2c423
|
bdf6ad2c1d873831cb80f09cb62e068cba43e203
|
refs/heads/main
| 2023-02-02T13:37:27.407966
| 2020-12-23T07:25:22
| 2020-12-23T07:25:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
# Generated by Django 3.1.4 on 2020-12-23 06:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UrlLinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('facebook_url', models.URLField()),
('youtube_url', models.URLField()),
('gmail_url', models.URLField()),
],
),
]
|
[
"61817579+baseplate-admin@users.noreply.github.com"
] |
61817579+baseplate-admin@users.noreply.github.com
|
c85721734e624cc87ceb5ad6f0d8bd65975de78a
|
c1e87e9a7f0f2e81e3113821c21378f7b6436b6f
|
/Щелчок/24/24_52.py
|
69e3935c47b9be84d6bb1164ad23e1c9dffd7548
|
[] |
no_license
|
Pochemu/Activity
|
8e2a7ec4f6b7fd233c0ee48e893733b077aac7a4
|
1b21e674635ff95104e18e93241c30020032e26a
|
refs/heads/main
| 2023-07-09T04:04:06.337321
| 2021-07-06T21:38:26
| 2021-07-06T21:38:26
| 337,492,398
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
f = open('shkolkovo_2.txt')
cnt = 0
cnt_max = 0
for s in f:
if s.count('A') < 25:
for i in range(ord('A'), ord('Z')+1):
cnt = s.rindex(chr(i)) - s.index(chr(i))
cnt_max = max(cnt, cnt_max)
print(cnt_max)
|
[
"48361330+Pochemu@users.noreply.github.com"
] |
48361330+Pochemu@users.noreply.github.com
|
7916f1c0dcf1066cc49205d234ec76a5db1cf997
|
4e30c855c253cc1d972d29e83edb9d5ef662d30a
|
/daybook/forms.py
|
78fc660f041185c9829b162003142e5d49f5e0cc
|
[
"MIT"
] |
permissive
|
rajeshr188/django-onex
|
8b531fc2f519d004d1da64f87b10ffacbd0f2719
|
0a190ca9bcf96cf44f7773686205f2c1f83f3769
|
refs/heads/master
| 2023-08-21T22:36:43.898564
| 2023-08-15T12:08:24
| 2023-08-15T12:08:24
| 163,012,755
| 2
| 0
|
NOASSERTION
| 2023-07-22T09:47:28
| 2018-12-24T17:46:35
|
Python
|
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django import forms
from django.contrib.admin.widgets import AdminDateWidget
class daybookform(forms.Form):
date = forms.DateField(widget=AdminDateWidget())
|
[
"rajeshrathodh@gmail.com"
] |
rajeshrathodh@gmail.com
|
f20c8a872358b9fb3614dae96f4a639ff2d38ed1
|
9b20743ec6cd28d749a4323dcbadb1a0cffb281b
|
/13_Deep_Learning_with_Python/13/decay_drop_based.py
|
7a7df9ff76b941850db37b74bf66d0353ae9cb4f
|
[] |
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178
|
ee265f1c6029c91daff172b3e7c1a96177646bc5
|
refs/heads/master
| 2023-03-07T19:30:26.691659
| 2021-02-19T08:00:49
| 2021-02-19T08:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
# Drop-Based Learning Rate Decay
import math
from keras.callbacks import LearningRateScheduler
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from pandas import read_csv
from sklearn.preprocessing import LabelEncoder
# learning rate schedule
def step_decay(epoch):
initial_lrate = 0.1
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
return lrate
# load dataset
dataframe = read_csv("ionosphere.csv", header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:, 0:34].astype(float)
Y = dataset[:, 34]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
Y = encoder.transform(Y)
# create model
model = Sequential()
model.add(Dense(34, input_dim=34, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
sgd = SGD(lr=0.0, momentum=0.9)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate]
# Fit the model
model.fit(X, Y, validation_split=0.33, epochs=50, batch_size=28, callbacks=callbacks_list, verbose=2)
|
[
"jgrimes@jgrimes.tech"
] |
jgrimes@jgrimes.tech
|
7b703bc2896e3cd0f8b426c39ecbb5420161c927
|
d17a8870ff8ac77b82d0d37e20c85b23aa29ca74
|
/lite/tests/unittest_py/op/backends/host/test_relu6_op.py
|
e47927da98e69c262b7712e29005b73d40fe45f7
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle-Lite
|
4ab49144073451d38da6f085a8c56822caecd5b2
|
e241420f813bd91f5164f0d9ee0bc44166c0a172
|
refs/heads/develop
| 2023-09-02T05:28:14.017104
| 2023-09-01T10:32:39
| 2023-09-01T10:32:39
| 104,208,128
| 2,545
| 1,041
|
Apache-2.0
| 2023-09-12T06:46:10
| 2017-09-20T11:41:42
|
C++
|
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../../common')
sys.path.append('../../../')
import test_relu6_op_base
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
class TestRelu6Op(AutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self, draw):
return test_relu6_op_base.sample_program_configs(draw)
def sample_predictor_configs(self):
config = CxxConfig()
config.set_valid_places(
{Place(TargetType.Host, PrecisionType.FP32, DataLayoutType.NCHW)})
yield config, ["relu6"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
c96ebeab3ec93603bfbe70fbec041ad533547e03
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/news/models_20201125123239.py
|
55e05516581b090c070f2f6ce249d636eccf8f32
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,782
|
py
|
from django.db import models
from modelcluster.models import ParentalKey
from wagtail.contrib.forms.models import AbstractEmailForm, AbstractFormField
from wagtail.admin.edit_handlers import FieldPanel, InlinePanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.core.fields import RichTextField
# Create your models here.
class CustomAbstractFormField(AbstractFormField):
field_type = models.CharField(
vebose_name= 'Field Type',
max_
)
class FormField(AbstractFormField):
page = ParentalKey(
'NewsPage',
on_delete=models.CASCADE,
related_name='form_fields',
)
class NewsPage(AbstractEmailForm):
tempalte ='news/news_page.html'
leanding_page_template = 'news/news_page_leading.html'
subpage_types = []
max_coun = 1
intro = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul'])
thank_you_text = RichTextField(
blank=True,
features=['bold', 'italic', 'ol', 'ul'])
map_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=False,
on_delete=models.SET_NULL,
help_text='Obrazek będzie przycięty do rozmairu 588px na 355 px',
related_name='+',
)
map_url = models.URLField(
blank=True,
help_text='Opcjonalne. Jeśli podasz tutaj łączę, obraz stanie się łączem.'
)
content_panels = AbstractEmailForm.content_panels + [
FieldPanel('intro'),
ImageChooserPanel('map_image'),
FieldPanel('map_url'),
InlinePanel('form_fields', label="Form Fields"),
FieldPanel('thank_you_text'),
FieldPanel('from_address'),
FieldPanel('to_address'),
FieldPanel('subject'),
]
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
99b8d312c2b0f1dd950dc13681e497b85cb2db8b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/305/78885/submittedfiles/testes.py
|
c9fd898511eec6ab3f5fd5d3e4799a1e4e204ad4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
# -*- coding: utf-8 -*-
import math
#COMECE A PARTIR DAQUI!
h = float(input('digite o valor de : '))
P = ((72.7*h) - 58)
print('%.2f' %P)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
82037c643ef27e2ed8ebad5f7e521367051eb0fd
|
cc382cc8fc521c3427f582adedc1ff7b93a754de
|
/Python/Quiz/P_sum.py
|
2797c7c578f859d95f4094f7d758468e6d29fb69
|
[] |
no_license
|
jbro321/Python
|
c33e899145ea47db41038fcc117789ce821c4131
|
36854890aefd0384567b05600cbb849094cd9f91
|
refs/heads/main
| 2023-06-05T19:53:40.144922
| 2021-06-24T13:29:24
| 2021-06-24T13:29:24
| 331,165,120
| 2
| 3
| null | 2021-04-05T03:46:58
| 2021-01-20T02:06:00
|
Python
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
# 문제
# ***a,b,c,d 변수 4개를 초기 입력으로 받는다
# *** 출력문 작성 시 a+b를 직접 사용해서 합을 출력하지 않는다.
# *** 아래 출력을 그대로 출력한다. 대신 []안에 내용은 출력하지 않아도 된다.
# *** 수1 + 수2 = 합 문장을 출력하면 된다.
# 출력
# --------------------
# a[입력받은 값] + b[입력받은값] = 합
# c[입력받은 값] + d[입력받은값] = 합
#1
from datetime import datetime
now = datetime.now()
print(now)
import sys
a = list(map(int, sys.stdin.readline().split()))
# a, b, c, d = map(int, input().split())
print("{} + {} = {}".format(a[0], a[1], sum(a[:2])))
print("{} + {} = {}".format(a[2], a[3], sum(a[2:])))
now = datetime.now()
print(now)
|
[
"jaehyung0321@gmail.com"
] |
jaehyung0321@gmail.com
|
0f4e7e981575a48924e04e71394b1f46bc2b670b
|
2e643989fad07bb54b75e178998bb1546540317a
|
/securetea/lib/auto_server_patcher/patcher.py
|
9181df1b1a141221e9970c3cb444903c1c5dc750
|
[
"MIT"
] |
permissive
|
fijimunkii/SecureTea-Project
|
4fa89c3a28120cc4dbdd4fc6adac204a3c1ae99a
|
e3752e358d9837ed4677984e1b29a2dd3818dfe6
|
refs/heads/master
| 2020-06-23T00:19:49.464335
| 2019-07-23T09:12:51
| 2019-07-23T09:12:51
| 198,443,213
| 1
| 0
|
MIT
| 2019-07-23T14:05:36
| 2019-07-23T14:05:35
| null |
UTF-8
|
Python
| false
| false
| 7,457
|
py
|
# -*- coding: utf-8 -*-
u"""Patcher for SecureTea Auto Server Patcher
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jun 20 2019
Version: 1.4
Module: SecureTea
"""
import json
from securetea.lib.auto_server_patcher.patch_logger import PatchLogger
from securetea.lib.auto_server_patcher import utils
class ConfigPatcher(object):
"""ConfigPatcher class."""
def __init__(self, debug=False, to_patch=None):
"""
Initialize ConfigPatcher.
Args:
debug (bool): Log on terminal or not
Raises:
None
Returns:
None
"""
# Initialize logger
self.logger = PatchLogger(
__name__,
debug=debug
)
# Configuration file path
self._CONFIG_PATH = "securetea/lib/auto_server_patcher/configs/config.json"
# Load configuration
self.config_data = self.open_json(self._CONFIG_PATH)
# Categorize OS
os_name = utils.categorize_os()
if os_name:
try:
self.os_config_data = self.config_data[os_name] # if OS in configuration
except KeyError:
self.logger.log(
"Could not load OS specific configuration.",
logtype="error"
)
else:
self.logger.log(
"Operating system cannot be determined.",
logtype="error"
)
sys.exit(0)
# List of files to patch
if to_patch:
self.to_patch = to_patch
else:
self.to_patch = []
def open_file(self, path):
"""
Open the file and return the data as list.
Args:
path (str): Path of the file
Raises:
None
Returns:
None
"""
try:
with open(path, "r") as f:
return f.readlines()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def open_json(self, path):
"""
Open the JSON file and return the data as dict.
Args:
path (str): Path of the file
Raises:
None
Returns:
None
"""
try:
with open(path, "r") as json_data_file:
data = json.load(json_data_file)
return data
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def write_data(self, path, data):
"""
Write the data into the file.
Args:
path (str): Path of the file
data (list): List of data to write
Raises:
None
Returns:
None
"""
try:
with open(path, "w") as wf:
for line in data:
wf.write(line + "\n")
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def patch(self):
"""
Patch the configuration file based
on the configuration data stored.
Args:
None
Raises:
None
Returns:
None
"""
for path in self.os_config_data: # iterate over the configuration
patch_this = False # patch this file or not
for req_patch in self.to_patch:
if req_patch in path:
patch_this = True
if patch_this:
self.logger.log(
"Patching: " + str(path),
logtype="info"
)
file_data = self.open_file(path) # open the file to configure
new_data = [] # new data to over-write
config_added = [] # successfully configured parameters
config_not_added = [] # not configured parameters
sep = self.os_config_data[path]["sep"] # separator
for index, line in enumerate(file_data):
flag = 0 # write the original line
for rep_text in self.os_config_data[path]["config"].keys():
hold = False # forward refrencing not needed
in_front = False # not found in forward refrence
if rep_text in line:
if line.strip(" ").strip("\n").startswith("#"): # found comment
hold = True # hold, prepare for forward refrence
if hold: # if forward refrence is needed
for _, nf_line in enumerate(file_data, start=index+1):
if (rep_text in nf_line and
not nf_line.strip(" ").strip("\n").startswith("#")):
in_front = True # found in forward refrencing
if not in_front: # not in forward refrencing
self.logger.log(
"Old config line: " + line.strip("\n"),
logtype="info"
)
new_config_line = rep_text + sep + \
self.os_config_data[path]["config"][rep_text]
new_data.append(new_config_line)
config_added.append(rep_text)
flag = 1 # write the new line
self.logger.log(
"New config line: " + new_config_line,
logtype="info"
)
if flag == 0: # write the original line
new_data.append(line.strip(" ").strip("\n"))
elif flag == 1: # already written
flag = 0 # reset flag
# Look which parameters were not over-written
# as they were not found in the config file
for rep_text in self.os_config_data[path]["config"].keys():
if rep_text not in config_added:
new_config_line = rep_text + sep + \
self.os_config_data[path]["config"][rep_text]
config_not_added.append(new_config_line)
# Extend the new configuration
new_data.extend(config_not_added)
# Write the data (overwrite) the config file
self.write_data(path=path, data=new_data)
self.logger.log(
"Patched: " + str(path),
logtype="info"
)
# Empty the list for the next configuration file
new_data.clear()
config_added.clear()
config_not_added.clear()
|
[
"abhishek_official@hotmail.com"
] |
abhishek_official@hotmail.com
|
d6a216950d956f0368d07afdb48bf084b58ff7e5
|
8e3a3c845ca3320483b233e8a0db4081aa3b8664
|
/clases/migrations/0007_contadorpreguntas.py
|
4a29e06d54f68db53637390cbb6553b607a32b13
|
[] |
no_license
|
sofide/loiprocesos
|
7d56398395e6f3302f4d9ec3627ed1b4c24bc17a
|
4047fa02d0cfbcf744c80d59e3402215f8b294d3
|
refs/heads/master
| 2021-07-08T03:26:55.171459
| 2020-08-04T03:23:10
| 2020-08-04T03:23:10
| 61,167,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-24 02:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('clases', '0006_auto_20160623_0041'),
]
operations = [
migrations.CreateModel(
name='ContadorPreguntas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cantidad', models.IntegerField()),
('primero', models.BooleanField(default=False)),
('ultimo', models.BooleanField(default=False)),
('clase', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clases.Clase')),
('exposicion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clases.Exposicion')),
('preguntador', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clases.Grupo')),
],
),
]
|
[
"sofi.denner@gmail.com"
] |
sofi.denner@gmail.com
|
ede826fd675c4a60c307a3e7000c07f5f2902746
|
12f83344cdfe561db39ad9106dbf263ccd919f7e
|
/Projects/miami_metro/platformdatafetcher/producturlsextractor.py
|
544ed8554122d5443bb722cf9841e792d34c5f67
|
[] |
no_license
|
TopWebGhost/Angular-Influencer
|
ebcd28f83a77a92d240c41f11d82927b98bcea9e
|
2f15c4ddd8bbb112c407d222ae48746b626c674f
|
refs/heads/master
| 2021-01-19T10:45:47.039673
| 2016-12-05T01:59:26
| 2016-12-05T01:59:26
| 82,214,998
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
import logging
import time
import baker
from celery.decorators import task
import requests
import lxml.html
from django.conf import settings
from selenium.webdriver.support.ui import WebDriverWait
from xpathscraper import utils
from xpathscraper import xbrowser
log = logging.getLogger('platformdatafetcher.producturlsextractor')
class ProductUrlsExtractor(object):
supported_domains = []
def extract_product_urls(self, url):
raise NotImplementedError()
class LiketkExtractor(ProductUrlsExtractor):
supported_domains = ['liketoknow.it', 'liketk.it']
def extract_product_urls(self, url):
try:
with xbrowser.XBrowser(headless_display=settings.AUTOCREATE_HEADLESS_DISPLAY) as xb:
xb.load_url(url)
anchors = WebDriverWait(xb.driver, 10).until(
lambda _: xb.els_by_xpath('//div[@class="hoverflow"]//a')
)
anchors = [a for a in anchors if a.get_attribute('href') and \
utils.domain_from_url(a.get_attribute('href')) == 'rstyle.me']
urls = utils.unique_sameorder(a.get_attribute('href') for a in anchors)
return urls
except Exception as e:
log.exception(e, extra={'url': url})
return None
CLASSES = [
LiketkExtractor,
]
ALL_SUPPORTED_DOMAINS = {dom for cls in CLASSES for dom in cls.supported_domains}
@baker.command
def do_extract_product_urls(url):
domain = utils.domain_from_url(url)
matching_classes = [cls for cls in CLASSES if domain in cls.supported_domains]
res = []
for cls in matching_classes:
e = cls()
e_res = e.extract_product_urls(url)
log.info('%r extracted product urls: %r', e, e_res)
res += e_res
res = utils.unique_sameorder(res)
log.info('All product urls extracted from %r: %r', url, res)
return res
def get_blog_url_from_liketoknowit(liketoknowit_url=None, xb=None):
"""
Function to extract user's blog url from her http://liketoknow.it/<username> page.
:param liketoknowit_url: url to liketoknowit page
:return: blog url
"""
def get_the_blog_url(xb, liketoknowit_url):
xb.load_url(liketoknowit_url)
anchors = WebDriverWait(xb.driver, 10).until(
lambda _: xb.els_by_xpath('//publisher-header//h5//a')
)
anchors = [a for a in anchors if a.get_attribute('href')]
urls = utils.unique_sameorder(a.get_attribute('href') for a in anchors)
return urls[0] if len(urls) > 0 else None
if liketoknowit_url is None:
return None
try:
if xb is None:
with xbrowser.XBrowser(headless_display=settings.AUTOCREATE_HEADLESS_DISPLAY) as xb:
return get_the_blog_url(xb, liketoknowit_url)
else:
return get_the_blog_url(xb, liketoknowit_url)
except Exception as e:
log.exception(e, extra={'url': liketoknowit_url})
return None
if __name__ == '__main__':
utils.log_to_stderr()
baker.run()
|
[
"ivanfridrich1981129@yandex.com"
] |
ivanfridrich1981129@yandex.com
|
60e328a06c3e1fdb001548de48de55c0eb23e0fb
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/2722.py
|
d7f75c386beda85c91ab2587d1ef7524e8e9592a
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
#Author: Alexander Peel
a = open("small-a.gen.in", "r")
b = open("small-a.gen.out", "r")
charMap = {'z': 'q', 'q': 'z'}
revCharMap = {'q': 'z'}
j=0
for x in range(3):
line = a.readline()
gLine = b.readline()
for i in range(min(len(gLine),len(line))):
if line[i] not in charMap.keys():
charMap[line[i]] = gLine[i]
revCharMap[gLine[i]] = line[i]
for x in sorted(revCharMap.keys()):
print(x, revCharMap[x])
a.close()
b.close()
print("size:", len(charMap))
inp = open("A-small-attempt1.in", 'r')
out = open("small-a.out", 'w')
#for x in charMap.keys():
#print(x, charMap[x])
T = int (inp.readline())
i = 1
for x in range(T):
out.write("Case #" + str(i) + ": ")
line = inp.readline()
for c in line:
out.write(charMap[c])
i+=1
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
6a33b0bb6c094e98d634895bdc96a009efda9df4
|
5e8d86f6ddfd516b9768e8617ced0baca8112f4c
|
/core-python/Core_Python/regexpkg/Regex_As_3.py
|
d04581509fb9cb7e606dd55ed7907e899e3dc795
|
[
"MIT"
] |
permissive
|
bharat-kadchha/tutorials
|
0a96ce5a3da1a0ceb39a0d464c8f3e2ff397da7c
|
cd77b0373c270eab923a6db5b9f34c52543b8664
|
refs/heads/master
| 2022-12-23T11:49:34.042820
| 2020-10-06T03:51:20
| 2020-10-06T03:51:20
| 272,891,375
| 1
| 0
|
MIT
| 2020-06-17T06:04:33
| 2020-06-17T06:04:33
| null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
''' The contains_acronym function checks the text for the presence of 2 or more
characters or digits surrounded by parentheses, with at least the first character
in uppercase (if it's a letter), returning True if the condition is met, or False
otherwise. For example, "Instant messaging (IM) is a set of communication
technologies used for text-based communication" should return True since (IM)
satisfies the match conditions." Fill in the regular expression
in this function: '''
import re
def contains_acronym(text):
pattern = r"[(\^[A-Z0-9][a-zA-Z]{2,}[)\]]"
result = re.search(pattern, text)
return result != None
print(contains_acronym("Instant messaging (IM) is a set of communication technologies used for text-based communication")) # True
print(contains_acronym("American Standard Code for Information Interchange (ASCII) is a character encoding standard for electronic communication")) # True
print(contains_acronym("Please do NOT enter without permission!")) # False
print(contains_acronym("PostScript is a fourth-generation programming language (4GL)")) # True
print(contains_acronym("Have fun using a self-contained underwater breathing apparatus (Scuba)!")) # True
|
[
"deeppatel.dd@gmail.com"
] |
deeppatel.dd@gmail.com
|
97f89bd27f13c0f02aecc8b7b6338f2d4ceb35d4
|
aa9a0acc85a7328969a81527f3ed7c155a245727
|
/chapter_7/mountain_poll.py
|
1cbe3e263c36adbf0e5ad45eaf8e28e5f361de72
|
[] |
no_license
|
mwnickerson/python-crash-course
|
7035e21e1ee60c05d1d475ebcf04bd6a93c5967a
|
18784c7e3abfb74f85f8c96cb0f8e606cab6dccc
|
refs/heads/main
| 2023-08-03T20:14:49.883626
| 2021-09-25T05:31:12
| 2021-09-25T05:31:12
| 400,644,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
# mountain poll
# set a flag to indicate that polling is active
responses = {}
# set a flag to stop
polling_active = True
while polling_active:
# prompt for the person's name and response
name = input("\nWhat is your name? ")
response = input("Which mountain would you like to climb someday? ")
# store the response in the dictionary
responses[name] = response
# find out if anyone else is going to take the poll
repeat = input("Would you like to let another person respond? (yes/ no) " )
if repeat == 'no':
polling_active = False
# polling is complete. show the results
print("\n--- Poll Results ---")
for name, response in responses.items():
print(f"{name} would like to climb {response}")
|
[
"82531659+mwnickerson@users.noreply.github.com"
] |
82531659+mwnickerson@users.noreply.github.com
|
f61d55e3301a8e2accce79dfa29797592506b149
|
01ebe45440f08158c796466cca679dec9687d93c
|
/tests/cx_devices/devices_test.py
|
c069a3a600767508066fdd74089c959fea2e6c90
|
[
"MIT"
] |
permissive
|
ilarrain/controllerx
|
694211a247055d2a7417ba3648cdb099468c857b
|
0a1ef68b3fd2b6dbb2a499b6b9920e6b3bfa2185
|
refs/heads/master
| 2023-02-01T00:50:23.731673
| 2020-09-19T22:00:14
| 2020-09-19T22:00:14
| 297,980,184
| 0
| 0
| null | 2020-09-23T13:33:42
| 2020-09-23T13:33:41
| null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
import cx_devices as devices_module
from cx_core import Controller
from cx_core.controller import ReleaseHoldController
from tests.test_utils import get_instances
def check_mapping(mapping, all_possible_actions, device):
device_name = device.__class__.__name__
if mapping is None:
return
if issubclass(device.__class__, ReleaseHoldController):
delay = device.default_delay()
if delay < 0:
raise ValueError(
f"`default_delay` should be a positive integer and the value is `{delay}`. "
f"Device class: {device_name}"
)
for k, v in mapping.items():
if not isinstance(v, str):
raise ValueError(
"The value from the mapping should be a string, matching "
"one of the actions from the controller. "
f"The possible actions are: {all_possible_actions}. "
f"Device class: {device_name}"
)
if v not in all_possible_actions:
raise ValueError(
f"{device_name}: `{v}` not found in the list of possible action from the controller. "
+ f"The possible actions are: {all_possible_actions}"
)
def test_devices(hass_mock):
devices = get_instances(
devices_module.__file__, devices_module.__package__, Controller
)
for device in devices:
type_actions_mapping = device.get_type_actions_mapping()
if type_actions_mapping is None:
continue
possible_actions = list(type_actions_mapping.keys())
integration_mappings_funcs = [
device.get_z2m_actions_mapping,
device.get_deconz_actions_mapping,
device.get_zha_actions_mapping,
]
for func in integration_mappings_funcs:
mappings = func()
check_mapping(mappings, possible_actions, device)
|
[
"xaviml.93@gmail.com"
] |
xaviml.93@gmail.com
|
a357e72273bb8edb1f639d91c440f562df43333f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_stagehands.py
|
1e2b38dd0830cac5b9bb7439a49fbae6ba2fda40
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _STAGEHANDS():
def __init__(self,):
self.name = "STAGEHANDS"
self.definitions = stagehand
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['stagehand']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f6f1ad958285f676ec5c7f6800454799dbcb29b6
|
c77148a25435b50a35fceab36112fba18dbb0866
|
/backup/Jun13/units/ZemoraVoidbringer.py
|
2e0ac05310a366bbc1dee0479db209f1851a9f09
|
[] |
no_license
|
SozBroz/PrismataBot
|
51fbecf90950d13eb52606a5b18984b5474746ba
|
f375ca8dc396abbca4134f70cb262fc78b90a17e
|
refs/heads/master
| 2020-05-31T13:37:50.102010
| 2019-06-06T02:48:01
| 2019-06-06T02:48:01
| 136,826,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
#!/usr/bin.python3.6
class ZemoraVoidbringer:
def __init__(self,owner):
self.owner=owner
self.lifespan=-1
self.frontline=False
self.cooldown=6
self.defaultBlocking=False
self.assignedBlocking=False
self.health=20
self.fragile=True
self.attack=8
self.startTurnDict={
}
self.onClickDict={
"gold":8,
"attack":8
}
self.onClickCost={
"green":8
}
def __str__(self):
return "Zemora Voidbringer"
def startTurn(self):
return True
def canClick(self):
for i in self.OnClickCost
if owner.resDict[i]<=onClickCost:
return False
return True
def onClick(self):
for i in self.onClickCost:
player.resDict[i]-=self.onClickCost[i]
def ZemoraVoidbringerCost():
buyCostDict={
"gold":5,
"green":3
}
return buyCostDict,True,1,[],"Zemora Voidbringer"
|
[
"Phil@oc1140302110.ibm.com"
] |
Phil@oc1140302110.ibm.com
|
ac6069daea455c23cbfa5a901efeccb900fd13c5
|
5785d7ed431b024dd910b642f10a6781df50e4aa
|
/revise-daily/google/educative/dp/10_subset_sum_partition.py
|
8731a79078aa769c5036698c2d03b4fcc1f54262
|
[] |
no_license
|
kashyapa/interview-prep
|
45d77324446da34d99bf8efedb3544b367b5523e
|
7060c090c40602fb9c4778eace2078e1b51e235b
|
refs/heads/master
| 2023-07-28T13:12:49.515299
| 2021-09-06T14:33:25
| 2021-09-06T14:33:25
| 403,706,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# Given a set of positive numbers, find if we can partition it into two subsets such that the sum of elements in both the subsets is equal.
def equal_subset_sum_partition(nums):
s = sum(nums)
def rec(idx, remaining_sum):
if idx == len(nums):
return remaining_sum == 0
if remaining_sum == 0:
return True
if nums[idx] < remaining_sum:
with_num = rec(idx+1, remaining_sum-nums[idx])
if with_num:
return True
return rec(idx+1, remaining_sum)
return rec(0, s)
def equal_subset_sum_dp(nums):
s = sum(nums) // 2
dp = [[False for _ in range(s+1)] for _ in range(len(nums))]
for i in range(len(nums)):
dp[i][0] = True
for i in range(s+1):
dp[0][i] = True if nums[0] == i else False
for i in range(len(nums)):
for j in range(s+1):
if dp[i-1][j]:
dp[i][j] = True
if nums[i] <= j:
dp[i][j] = dp[i-1][j-nums[i]]
return dp[len(nums)-1][s]
|
[
"schandra2@godaddy.com"
] |
schandra2@godaddy.com
|
497f21accdf90e5db6cfc9b223630ab19336cddb
|
85fc4fcd841226c30b1a5824468eae95e6da3cd1
|
/oddities.py
|
3aadbd44e0062fdb114cb855a0e00371b739b293
|
[] |
no_license
|
a5vh/kattis
|
1676060acfc6eef1d7c558299063646f3b7fcbf3
|
093cbeba31149fa0182ecc1bc8a43c60cdb1fa36
|
refs/heads/master
| 2020-08-17T19:54:11.754205
| 2019-11-26T01:34:29
| 2019-11-26T01:34:29
| 215,705,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
times = int(input())
for i in range(times):
num = int(input())
if num % 2 == 0:
print(num, " is even")
elif num % 2 != 0:
print(num, " is odd")
|
[
"august.hummert5@gmail.com"
] |
august.hummert5@gmail.com
|
23452b8aa23ec30902431afb8e033d9650a11a27
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4050/186004050.py
|
0ef5e94fd43a44abe1431659b10050e7af9dfadf
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 4,983
|
py
|
from bots.botsconfig import *
from records004050 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'UW',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'LTR', MIN: 0, MAX: 99},
{ID: 'NM1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 3},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 9},
{ID: 'PER', MIN: 0, MAX: 3},
]},
{ID: 'ACT', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NM1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'DTP', MIN: 0, MAX: 5},
{ID: 'AM1', MIN: 0, MAX: 9},
{ID: 'PWK', MIN: 0, MAX: 1},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'DMA', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 99999},
]},
{ID: 'BOR', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTP', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'NM1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DMA', MIN: 0, MAX: 1},
{ID: 'REL', MIN: 0, MAX: 1},
]},
{ID: 'SPK', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'CD2', MIN: 0, MAX: 9},
{ID: 'DTP', MIN: 0, MAX: 9},
{ID: 'REF', MIN: 0, MAX: 1},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'NM1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N4', MIN: 0, MAX: 1},
]},
]},
{ID: 'LTR', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'CD2', MIN: 0, MAX: 9},
{ID: 'DTP', MIN: 0, MAX: 9},
{ID: 'NM1', MIN: 0, MAX: 9},
{ID: 'MSG', MIN: 0, MAX: 99999},
]},
{ID: 'UC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'HL', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'UQS', MIN: 0, MAX: 1},
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DTP', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'DMA', MIN: 0, MAX: 1},
{ID: 'AM1', MIN: 0, MAX: 1},
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 1},
{ID: 'EC', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 1},
{ID: 'IN1', MIN: 0, MAX: 1},
{ID: 'EMS', MIN: 0, MAX: 1},
{ID: 'ASL', MIN: 0, MAX: 99999},
{ID: 'TOA', MIN: 0, MAX: 1},
{ID: 'TOV', MIN: 0, MAX: 1},
{ID: 'III', MIN: 0, MAX: 99999},
{ID: 'SIN', MIN: 0, MAX: 1},
{ID: 'UCS', MIN: 0, MAX: 1},
{ID: 'FH', MIN: 0, MAX: 1},
{ID: 'UD', MIN: 0, MAX: 1},
{ID: 'CDS', MIN: 0, MAX: 1},
{ID: 'CED', MIN: 0, MAX: 1},
{ID: 'YNQ', MIN: 0, MAX: 1},
{ID: 'MPI', MIN: 0, MAX: 1},
{ID: 'EFI', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'BIN', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'UD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NM1', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REL', MIN: 0, MAX: 1},
{ID: 'DTP', MIN: 0, MAX: 1},
{ID: 'EFI', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'BIN', MIN: 1, MAX: 1},
]},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
5f656012f1f9e8a1142bd5fbe97e5773232143b6
|
f35259cdae65259d41d5c86f09351d6888a81de6
|
/module6.py
|
87ce2236305191b49e11e2aa5db13fd19e0d7fba
|
[
"MIT"
] |
permissive
|
RajeshKumar-1998/python-web
|
9dbc71da656baec7946b9dae9f36919ac60be0ca
|
a4a7823752ce91a7a5bd3aaa1210c9ddad7ea55e
|
refs/heads/master
| 2022-04-18T04:57:12.274235
| 2020-04-23T12:28:14
| 2020-04-23T12:28:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Rajesh
#
# Created: 26-12-2019
# Copyright: (c) Rajesh 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os
import threading
from threading import Thread
import time
def usr():
print("Wait for few secs")
time.sleep(5)
mypath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(mypath)
return
def main():
inp = input("Enter the query")
print(inp)
print("Your Query is being processing !!!")
usr()
if __name__ == '__main__':
main()
|
[
"57509232+Rajeshkumar-dev-cloud@users.noreply.github.com"
] |
57509232+Rajeshkumar-dev-cloud@users.noreply.github.com
|
be90ebc66d52f4050ebf05b9127d53fc6025b638
|
b09e71b77dd41d5db266733d1eedb845cb56d5c2
|
/models/ts_hred/src/hred/read_data.py
|
e4fab39a8fee951ef16fc947503a9388aea060df
|
[] |
no_license
|
lingxiao/neural-chatbot
|
1bcaaea5ede06d0cdc7232f3905b2c813f97f36d
|
70dc9366c9d419c26cfb51086187026503820267
|
refs/heads/master
| 2021-01-20T07:51:26.398642
| 2017-12-26T07:05:11
| 2017-12-26T07:05:11
| 90,052,227
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,147
|
py
|
import tensorflow as tf
import os
import numpy as np
import subprocess
import pickle
import os
import logging as logger
def read_batch( data_file
, batch_size = 80
, eoq_symbol = 1
, pad_symbol = 2
, max_seq_len = 50 ):
batch = ([], [])
from sys import platform
if platform == "darwin":
command = 'gshuf'
else:
command = 'shuf'
# subprocess.call('%s %s -o %s' % (command, data_file, data_file), shell=True) # shuffling the file for the batches
for i, (x, y) in enumerate(read_line(data_file, eoq_symbol)):
if i != 0 and i % batch_size == 0:
padded_batch, max_len = add_padding(batch, eoq_symbol, pad_symbol, max_seq_len)
yield padded_batch, max_len
batch = ([], [])
batch[0].append(x)
batch[1].append(y)
def read_line(data_file, eoq_symbol=1, eos_symbol=2, sos_symbol=3):
with open(data_file, 'r') as df:
for line in df:
# first replace tab with eoq symbol, never predict eos_symbol
# x = [int(i) for i in line.strip().replace('\t', ' %d ' % eoq_symbol).split()]
# y = x[1:] + [eoq_symbol]
x = [sos_symbol] + [int(i) for i in line.strip().replace('\t', ' %d ' % eoq_symbol).split()] + [eoq_symbol]
y = x[1:] + [eos_symbol]
yield x, y
def add_padding(batch, eoq_symbol=1, pad_symbol=2, max_seq_len=50):
max_len_x = len(max(batch[0], key=len))
max_len_y = len(max(batch[1], key=len))
max_len = min(max(max_len_x, max_len_y), max_seq_len)
padded_batch = ([], [])
# If the length of the current session is longer than max len, we remove the part that is too much
for i in range(len(batch[0])):
x = batch[0][i]
y = batch[1][i]
if len(x) > max_len:
x = x[:max_len]
y = y[:max_len - 1] + [eoq_symbol]
else:
padding = [pad_symbol for j in range(max_len - len(x))]
x += padding
y += padding
padded_batch[0].append(x)
padded_batch[1].append(y)
# Return max_len to keep track of this, to be able to adapt model
return padded_batch, max_len
def add_padding_and_sort(batch, eoq_symbol, pad_symbol, max_seq_len):
sorted_batch = batch.sort(key=len)
add_padding(sorted_batch, eoq_symbol, pad_symbol, max_seq_len)
def read_vocab_lookup(vocab_file):
vocab_shifted = read_token_lookup(vocab_file)
return dict((v, k) for k, v in vocab_shifted.iteritems())
def read_token_lookup(vocab_file):
assert os.path.isfile(vocab_file)
vocab = pickle.load(open(vocab_file, "r"))
# vocab = dict([(x[0], x[1]) for x in loaded_file])
# Check consistency
if '<unk>' not in vocab:
vocab['<unk>'] = 0
if '</q>' not in vocab:
vocab['</q>'] = 1
if '</s>' not in vocab:
vocab['</s>'] = 2
if '</p>' not in vocab:
vocab['</p>'] = 3
logger.info("INFO - Successfully loaded vocabulary dictionary %s." % vocab_file)
logger.info("INFO - Vocabulary contains %d words" % len(vocab))
return vocab
|
[
"lingxiao@seas.upenn.edu"
] |
lingxiao@seas.upenn.edu
|
eae14d10395cebb9005075be962dbccde8e96453
|
d125c002a6447c3f14022b786b07712a7f5b4974
|
/tests/bugs/core_6496_test.py
|
83add6fecce024bf4fe791101f323a6d8fe430ef
|
[
"MIT"
] |
permissive
|
FirebirdSQL/firebird-qa
|
89d5b0035071f9f69d1c869997afff60c005fca9
|
cae18186f8c31511a7f68248b20f03be2f0b97c6
|
refs/heads/master
| 2023-08-03T02:14:36.302876
| 2023-07-31T23:02:56
| 2023-07-31T23:02:56
| 295,681,819
| 3
| 2
|
MIT
| 2023-06-16T10:05:55
| 2020-09-15T09:41:22
|
Python
|
UTF-8
|
Python
| false
| false
| 934
|
py
|
#coding:utf-8
"""
ID: issue-6726
ISSUE: 6726
TITLE: string_to_datetime and '\\0' symbol
DESCRIPTION:
ascii_char(0) was allowed to be concatenated with string and pass then to cast(... as timestamp)
up to 4.0.0.1227 (29-09-2018), and is forbidden since 4.0.0.1346 (17.12.2018).
FB 3.x allows this character to be used and issues timestamp instead of error.
JIRA: CORE-6496
FBTEST: bugs.core_6496
"""
import pytest
from firebird.qa import *
db = db_factory()
test_script = """
set heading off;
select cast('5.3.2021 01:02:03.1234' || ascii_char(0) as timestamp) from rdb$database;
"""
act = isql_act('db', test_script)
expected_stderr = """
Statement failed, SQLSTATE = 22009
Invalid time zone region:
"""
@pytest.mark.version('>=4.0')
def test_1(act: Action):
act.expected_stderr = expected_stderr
act.execute()
assert act.clean_stderr == act.clean_expected_stderr
|
[
"pcisar@ibphoenix.cz"
] |
pcisar@ibphoenix.cz
|
3f6b0953d0bf05f639da4bc89ab48e385ff3d3df
|
ec1deb682fb96a1f937f2fca5f161aa951462876
|
/pythonTextBook/exercises/classes/ex-9.6.py
|
e5f26b58f5dd3a61052ca7291d86c4b458d68fef
|
[] |
no_license
|
AnatoliKosarev/Python-beginner-course--Teclado-
|
31d82f5e9a1f39e2970323bed9de1fd539990565
|
fa91199938d6975b5874341585343566caaf3600
|
refs/heads/main
| 2023-06-30T12:14:33.779827
| 2021-07-24T11:16:19
| 2021-07-24T11:16:19
| 376,371,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
from HelloWorld.pythonTextBook.exercises.classes.exNineFour import Restaurant
class IceCreamStand(Restaurant):
def __init__(self, restaurant_name, cuisine_type):
super().__init__(restaurant_name, cuisine_type)
self.flavours = ["vanilla", "chocolate", "strawberry"]
def show_flavours(self):
print(", ".join(self.flavours))
if __name__ == "__main__":
print(__name__)
ice = IceCreamStand("Bob's", "ice cream")
ice.show_flavours()
ice.describe_restaurant()
|
[
"anatoli.kosarev@gmail.com"
] |
anatoli.kosarev@gmail.com
|
7dfd66da43d55823453a16f8a9f215ffd604f136
|
17e0b82a3481dc857be371e3189f2d5ec158111a
|
/src/service/connections/__init__.py
|
f54aa4f85d6c79e0450907ffa9a4baebf3b8b502
|
[
"Apache-2.0"
] |
permissive
|
deeso/service-utilities
|
582cc34cb944734ac16b42dd57815a44e59f9f0e
|
9a3ebdcad9a1d0049a23e3486d7ea99a6d08f81a
|
refs/heads/master
| 2021-05-14T14:53:37.536862
| 2018-01-23T11:37:12
| 2018-01-23T11:53:16
| 115,980,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
from .base_connection import ConnectionFactory
from .kombu_connection import KombuConnection
from .mongo_connection import MongoConnection
from .socket_connection import TCPConnection, UDPConnection
from .jsonline_connection import JsonTCPLineConnection, JsonUDPLineConnection
ConnectionFactory.register_connection(KombuConnection)
ConnectionFactory.register_connection(MongoConnection)
ConnectionFactory.register_connection(TCPConnection)
ConnectionFactory.register_connection(UDPConnection)
|
[
"adam.pridgen@thecoverofnight.com"
] |
adam.pridgen@thecoverofnight.com
|
cf2f2d7f88dda9d9c3caa6fbe711331cf4e416d8
|
019fd2c29b8239d7b0a3906cfbdddfd440362417
|
/datacatalog/google/cloud/datacatalog_v1beta1/proto/policytagmanagerserialization_pb2_grpc.py
|
74a7fdcfa5f77b46313389513bcdf64342f64c96
|
[
"Apache-2.0"
] |
permissive
|
tswast/google-cloud-python
|
1334d26cdb994293f307d889251d7daef5fcb826
|
d897d56bce03d1fda98b79afb08264e51d46c421
|
refs/heads/master
| 2021-06-10T17:40:06.968584
| 2020-01-11T17:41:29
| 2020-01-11T17:41:29
| 58,775,221
| 1
| 1
|
Apache-2.0
| 2019-04-10T17:09:46
| 2016-05-13T22:06:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,846
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.datacatalog_v1beta1.proto import (
policytagmanagerserialization_pb2 as google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2,
)
class PolicyTagManagerSerializationStub(object):
"""Policy tag manager serialization API service allows clients to manipulate
their taxonomies and policy tags data with serialized format.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ImportTaxonomies = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.PolicyTagManagerSerialization/ImportTaxonomies",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ImportTaxonomiesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ImportTaxonomiesResponse.FromString,
)
self.ExportTaxonomies = channel.unary_unary(
"/google.cloud.datacatalog.v1beta1.PolicyTagManagerSerialization/ExportTaxonomies",
request_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ExportTaxonomiesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ExportTaxonomiesResponse.FromString,
)
class PolicyTagManagerSerializationServicer(object):
"""Policy tag manager serialization API service allows clients to manipulate
their taxonomies and policy tags data with serialized format.
"""
def ImportTaxonomies(self, request, context):
"""Imports all taxonomies and their policy tags to a project as new
taxonomies.
This method provides a bulk taxonomy / policy tag creation using nested
proto structure.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportTaxonomies(self, request, context):
"""Exports all taxonomies and their policy tags in a project.
This method generates SerializedTaxonomy protos with nested policy tags
that can be used as an input for future ImportTaxonomies calls.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_PolicyTagManagerSerializationServicer_to_server(servicer, server):
rpc_method_handlers = {
"ImportTaxonomies": grpc.unary_unary_rpc_method_handler(
servicer.ImportTaxonomies,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ImportTaxonomiesRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ImportTaxonomiesResponse.SerializeToString,
),
"ExportTaxonomies": grpc.unary_unary_rpc_method_handler(
servicer.ExportTaxonomies,
request_deserializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ExportTaxonomiesRequest.FromString,
response_serializer=google_dot_cloud_dot_datacatalog__v1beta1_dot_proto_dot_policytagmanagerserialization__pb2.ExportTaxonomiesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.datacatalog.v1beta1.PolicyTagManagerSerialization",
rpc_method_handlers,
)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"noreply@github.com"
] |
tswast.noreply@github.com
|
e9c38467e2a787714831f7864bb435bcedfaeca4
|
3e8acb5749ff67add92a50718ed44f5e80b589b1
|
/app/helpers/sqlalchemy_helpers.py
|
edcae3444a81f58899f7e98b474581afeacd69a1
|
[] |
no_license
|
jeffthemaximum/mtinator
|
a43b0dc93575742b161373a460e0f883da1c0f35
|
a15eccf2acc727c32d8c0e0b31922f27edd3e386
|
refs/heads/master
| 2022-10-09T23:15:12.092352
| 2019-09-09T18:37:22
| 2019-09-09T18:37:22
| 207,014,198
| 0
| 0
| null | 2022-09-23T22:27:43
| 2019-09-07T19:09:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
import datetime
from app.models import Line, Status
def get_or_create(db_session, model, **kwargs):
created = False
instance = db_session.query(model).filter_by(**kwargs).first()
if instance:
return instance, created
else:
instance = model(**kwargs)
db_session.add(instance)
db_session.commit()
created = True
return instance, created
def update_line_and_status(line_name, status_name, db):
line, created = get_or_create(db.session, Line, name=line_name)
previous_status = Status.query.filter_by(
line_id=line.id).order_by(Status.create_time.desc()).first()
status = Status(name=status_name, line_id=line.id)
db.session.add(status)
db.session.commit()
log_status_change(line, status, previous_status)
cache_status_change(line, status, db)
line_name = line.name
status_name = status.name
print(f"{line_name} {status_name}")
return line, status
def log_status_change(line, status, previous_status):
if previous_status is not None:
line_name = line.name
log = None
if (previous_status.name == 'not delayed' and status.name == 'delayed'):
log = f"Line {line_name} is experiecing delays"
elif (previous_status.name == 'delayed' and status.name == 'not delayed'):
log = f"Line {line_name} is now recovered"
if log is not None:
print(log)
def cache_status_change(line, status, db):
if status is not None:
status_name = status.name
previous_status = Status.query.filter(
Status.create_time < status.create_time, Status.line_id == status.line_id).order_by(Status.create_time.desc()).first()
should_cache = (
status_name == 'delayed' or
(
status_name == 'not delayed' and
previous_status is not None and
previous_status.name == 'delayed'
)
)
if should_cache is True:
diff = status.create_time - previous_status.create_time
diff_seconds = diff.total_seconds()
line.down_time += diff_seconds
db.session.add(line)
db.session.commit()
|
[
"frey.maxim@gmail.com"
] |
frey.maxim@gmail.com
|
62f22b123e3f5dd0e4c74c13925b4c37009df1a8
|
33a50bb13812090a36257078522b798762978c66
|
/top/api/rest/JushitaJdpUserAddRequest.py
|
3936dcb28e2258dc2e002dc64d4199ecc712998e
|
[] |
no_license
|
aa3632840/quanlin
|
52ac862073608cd5b977769c14a7f6dcfb556678
|
2890d35fa87367d77e295009f2d911d4b9b56761
|
refs/heads/master
| 2021-01-10T22:05:14.076949
| 2014-10-25T02:28:15
| 2014-10-25T02:28:15
| 23,178,087
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
'''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class JushitaJdpUserAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.rds_name = None
def getapiname(self):
return 'taobao.jushita.jdp.user.add'
|
[
"262708239@qq.com"
] |
262708239@qq.com
|
be29efc36f6a969ab3bd214e43536e35705e975e
|
9b722ca41671eb2cea19bac5126d0920639261bd
|
/.history/app_20201122215937.py
|
6289174e858ea6631f102042e9c2fb4ba21ac8be
|
[] |
no_license
|
thawalk/db_flask_server
|
7928fd481f99d30bdccc60d97f02db78324cfdbe
|
cd55f1c9bf84c734457ee02d9f64a6833e295fad
|
refs/heads/master
| 2023-01-25T02:40:19.097457
| 2020-12-06T07:45:50
| 2020-12-06T07:45:50
| 314,229,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,946
|
py
|
import json
from flask import Flask, jsonify, url_for, request, redirect,Response,Request
# from flask_pymongo import PyMongo
import pymongo
from bson.json_util import dumps
import mysql.connector
from werkzeug.serving import run_simple
import os
from dotenv import load_dotenv
import datetime
import time
app = Flask(__name__)
# mongo_url = os.getenv("mongo_url")
# dbname = os.getenv("database_name")
# mongo_store = MongoClient(mongo_url)
# metadata = mongo_store.dbname.metadata
test_collection='test_collection'
# sample='user_collection'
mongo = pymongo.MongoClient('mongodb://54.211.223.244:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')
db = pymongo.database.Database(mongo, 'test')
metadata_col = pymongo.collection.Collection(db, 'test_collection')
# impt = dumps(list(col.find({"asin":"1603420304"})))
# print(impt)
# list_data = list(data)
# data_print = dumps(list_data)
# print(data_print)
# data = mongo_store.dbname.sample
# print("testing metadata find")
# print(dumps(list(metadata.find().limit(10))))
db = mysql.connector.connect(
host ='52.87.158.130',
user = 'root',
password = '',
database = 'reviews'
)
cur = db.cursor()
# cur.execute("SELECT asin from kindle_reviews group by asin order by avg(overall) desc limit 9 ")
# print(cur.fetchall())
# print("above fetch all")
@app.route('/',methods=["GET"])
def api_root():
data = {
'message': 'Welcome to our website. Where reviews are our number one priority'
}
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
#returns list of categories
@app.route('/categories', methods = ['GET'])
def get_categories():
categories = []
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
#Search for book using title, price or asin
@app.route('/search', methods=['GET'])
def search_book():
data = dumps(list(metadata_col.find().limit(10)))
print(data)
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
# book = []
# if title in request.args:
# book = metadata.find({'title': title})
# elif price in request.args:
# book = metadata.find({'price':price})
# elif asin in request.args:
# book = metadata.find({'asin':asin})
# if len(book) > 0:
# msg = {'status': 200, 'message': 'book(s) successfully found', 'books': book}
# else :
# msg = {'status': 500, 'message': 'no books found with the following searches'}
# return jsonify(msg)
# @app.route('/review', methods=['POST'])
# def add_review():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
# txt = "INSERT INTO 'kindle_reviews' ('id', 'asin', 'overall', 'reviewText', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime') VALUES (%s)"
# values = (None, request.json['asin'], request.json['overall'], request.json['reviewText'], request.json['reviewTime'], request.json['reviewerID'], request.json['reviewerName'], request.json['summary'], request.json['unixReviewTime'])
# cur.execute(txt, values)
# return 'successfully uploaded new review', 200
@app.route()
if __name__ == '__main__':
# app.run(host="0.0.0.0", port=80)
app.run(debug=True)
|
[
"akmal_hakim_teo@hotmail.com"
] |
akmal_hakim_teo@hotmail.com
|
b356dcb3f63d9429c49159d66a995e1973602e26
|
53e90091d10a2454e14a02ecc689e355ac2a7cc1
|
/book/pylisting/code_stemmer_indexing.py
|
dfdae82f171252f3d61a071abec893db065ea0a1
|
[] |
no_license
|
dougalg/nltk.github.com
|
aac74cf03d17475adc177ac08691359cb1f4adb6
|
9a04ac5264f5ef08d87d6b920580c9160042f1a0
|
refs/heads/master
| 2020-12-07T17:15:15.894232
| 2014-04-21T14:11:17
| 2014-04-21T14:11:17
| 18,965,594
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
# Natural Language Toolkit: code_stemmer_indexing
class IndexedText(object):
def __init__(self, stemmer, text):
self._text = text
self._stemmer = stemmer
self._index = nltk.Index((self._stem(word), i)
for (i, word) in enumerate(text))
def concordance(self, word, width=40):
key = self._stem(word)
wc = width/4 # words of context
for i in self._index[key]:
lcontext = ' '.join(self._text[i-wc:i])
rcontext = ' '.join(self._text[i:i+wc])
ldisplay = '%*s' % (width, lcontext[-width:])
rdisplay = '%-*s' % (width, rcontext[:width])
print ldisplay, rdisplay
def _stem(self, word):
return self._stemmer.stem(word).lower()
|
[
"stevenbird1@gmail.com"
] |
stevenbird1@gmail.com
|
bf2bdb42b6618a874bea7a69332aef8708faac3e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/xNxZx7DDr6BumJLaB_18.py
|
8aa7e4f2ef2b69ac807c4f99c801cd1f3a29e0e7
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
"""
**Mubashir** wants to remove numbers from a given string!
Help him by fixing the code in the code tab to pass this challenge. Look at
the examples below to get an idea of what the function should do.
### Examples
remove_numbers("mubashir1") ➞ "mubashir"
remove_numbers("12ma23tt") ➞ "matt"
remove_numbers("e1d2a3b4i5t6") ➞ "edabit"
### Notes
* **READ EVERY WORD CAREFULLY, CHARACTER BY CHARACTER!**
* Don't overthink this challenge; it's not supposed to be hard.
"""
def remove_numbers(string):
return ''.join(i for i in string if i.isalpha())
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
2bbe287656c0e9eb6579f80e9e43e6a9663f4f51
|
4bf5f83a8e5cd4c3ee700569e4a6f07a87dd209c
|
/students/13th/hyungukkim/project_westagram/user/models.py
|
2bf6fc0ee085860322a503e233755d2382c454b0
|
[] |
no_license
|
gledong12/westagram-backend
|
6e066f4c741aa19df13224ba530b0d9f43a405f7
|
1842f065c599885ad5dcb9ec5fb267eaf3295872
|
refs/heads/master
| 2023-03-11T20:32:47.055525
| 2021-03-04T01:04:31
| 2021-03-04T01:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
from django.db import models
class Account(models.Model):
name = models.CharField(max_length=45)
email = models.CharField(max_length=45)
phone = models.CharField(max_length=45)
password = models.CharField(max_length=200)
followers = models.IntegerField(default=0)
followees = models.IntegerField(default=0)
relations = models.ManyToManyField('self', symmetrical=False, through='Relation', related_name='+')
class Relation(models.Model):
from_account = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='relation_from_account')
to_account = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='relation_to_account')
|
[
"lsheon93@gmail.com"
] |
lsheon93@gmail.com
|
22b9c617a333f0820997c188219ab0230f75f0bb
|
209a7a4023a9a79693ec1f6e8045646496d1ea71
|
/COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/util/test_doc.py
|
c9b12b5c490713603c6c109debfbd33547c8c5b0
|
[
"MIT"
] |
permissive
|
anzhao920/MicrosoftProject15_Invictus
|
5e2347015411bbffbdf0ceb059df854661fb240c
|
15f44eebb09561acbbe7b6730dfadf141e4c166d
|
refs/heads/main
| 2023-04-16T13:24:39.332492
| 2021-04-27T00:47:13
| 2021-04-27T00:47:13
| 361,913,170
| 0
| 0
|
MIT
| 2021-04-26T22:41:56
| 2021-04-26T22:41:55
| null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
from textwrap import dedent
from pandas.util._decorators import doc
@doc(method="cumsum", operation="sum")
def cumsum(whatever):
"""
This is the {method} method.
It computes the cumulative {operation}.
"""
@doc(
cumsum,
dedent(
"""
Examples
--------
>>> cumavg([1, 2, 3])
2
"""
),
method="cumavg",
operation="average",
)
def cumavg(whatever):
pass
@doc(cumsum, method="cummax", operation="maximum")
def cummax(whatever):
pass
@doc(cummax, method="cummin", operation="minimum")
def cummin(whatever):
pass
def test_docstring_formatting():
docstr = dedent(
"""
This is the cumsum method.
It computes the cumulative sum.
"""
)
assert cumsum.__doc__ == docstr
def test_docstring_appending():
docstr = dedent(
"""
This is the cumavg method.
It computes the cumulative average.
Examples
--------
>>> cumavg([1, 2, 3])
2
"""
)
assert cumavg.__doc__ == docstr
def test_doc_template_from_func():
docstr = dedent(
"""
This is the cummax method.
It computes the cumulative maximum.
"""
)
assert cummax.__doc__ == docstr
def test_inherit_doc_template():
docstr = dedent(
"""
This is the cummin method.
It computes the cumulative minimum.
"""
)
assert cummin.__doc__ == docstr
|
[
"ana.kapros@yahoo.ro"
] |
ana.kapros@yahoo.ro
|
1496f4e20eb8bd4117fbf6f91248ff6bc0e67fb0
|
9d3b264a75264a28bafa0d22889c0bf6c429bbf4
|
/fluent_contents/tests/utils.py
|
2d9bed0d2676c3d7b2df627df2b062d241e50491
|
[
"Apache-2.0"
] |
permissive
|
hexenxp14/django-fluent-contents
|
a799b3864c826edf9ed67f4132589f73a4cf7807
|
613a4cd51ac201b86adfd5434e08f9a56eebfa2d
|
refs/heads/master
| 2020-12-30T19:22:42.340848
| 2014-10-30T12:41:13
| 2014-10-30T12:41:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,536
|
py
|
from __future__ import print_function
from future.builtins import str
from functools import wraps
from django.conf import settings, UserSettingsHolder
from django.contrib.auth.models import User
from django.core.management import call_command
from django.contrib.sites.models import Site
from django.db.models import loading
from django.template.loaders import app_directories
from django.test import TestCase
from django.utils.importlib import import_module
import os
class AppTestCase(TestCase):
"""
Tests for URL resolving.
"""
user = None
install_apps = (
'fluent_contents.tests.testapp',
)
@classmethod
def setUpClass(cls):
if cls.install_apps:
# When running this app via `./manage.py test fluent_pages`, auto install the test app + models.
run_syncdb = False
for appname in cls.install_apps:
if appname not in settings.INSTALLED_APPS:
print('Adding {0} to INSTALLED_APPS'.format(appname))
settings.INSTALLED_APPS += (appname,)
run_syncdb = True
# Flush caches
testapp = import_module(appname)
loading.cache.loaded = False
app_directories.app_template_dirs += (
os.path.join(os.path.dirname(testapp.__file__), 'templates'),
)
print(appname, os.path.join(os.path.dirname(testapp.__file__), 'templates'))
if run_syncdb:
call_command('syncdb', verbosity=0) # may run south's overlaid version
# Create basic objects
# 1.4 does not create site automatically with the defined SITE_ID, 1.3 does.
Site.objects.get_or_create(id=settings.SITE_ID, defaults=dict(domain='django.localhost', name='django at localhost'))
(cls.user, _) = User.objects.get_or_create(is_superuser=True, is_staff=True, username="admin")
def assert200(self, url, msg_prefix=''):
"""
Test that an URL exists.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEquals(self.client.get(url).status_code, 200, str(msg_prefix) + u"Page at {0} should be found.".format(url))
def assert404(self, url, msg_prefix=''):
"""
Test that an URL does not exist.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEquals(self.client.get(url).status_code, 404, str(msg_prefix) + u"Page at {0} should return 404.".format(url))
try:
from django.test.utils import override_settings # Django 1.4
except ImportError:
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
4346489444cdc929ef4cbf646a290ff6bb17a126
|
d17a8870ff8ac77b82d0d37e20c85b23aa29ca74
|
/lite/tests/unittest_py/op/test_generate_proposals_op.py
|
e2f0d972640dd4077b6189d8870238550a6c648a
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle-Lite
|
4ab49144073451d38da6f085a8c56822caecd5b2
|
e241420f813bd91f5164f0d9ee0bc44166c0a172
|
refs/heads/develop
| 2023-09-02T05:28:14.017104
| 2023-09-01T10:32:39
| 2023-09-01T10:32:39
| 104,208,128
| 2,545
| 1,041
|
Apache-2.0
| 2023-09-12T06:46:10
| 2017-09-20T11:41:42
|
C++
|
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import numpy as np
from functools import partial
import hypothesis.strategies as st
class TestGenerateProposalsOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=16, max_value=32),
min_size=4,
max_size=4))
in_shape[0] = 1
anchor_sizes = draw(
st.sampled_from([[32.0], [32.0, 64.0], [64.0, 128.0],
[32.0, 64.0, 128.0]]))
aspect_ratios = draw(
st.sampled_from([[1.0], [1.0, 2.0], [0.5, 1.0, 2.0]]))
variances = draw(
st.lists(
st.floats(
min_value=0.5, max_value=1.5),
min_size=4,
max_size=4))
stride = draw(
st.sampled_from([[16.0, 16.0], [24.0, 24.0], [16.0, 24.0]]))
num_anchors = len(anchor_sizes) * len(aspect_ratios)
anchor_generator_op = OpConfig(
type="anchor_generator",
inputs={"Input": ["input_data"]},
outputs={
"Anchors": ["anchors_data"],
"Variances": ["variance_data"]
},
attrs={
"anchor_sizes": anchor_sizes,
"aspect_ratios": aspect_ratios,
"stride": stride,
"variances": variances,
"offset": 0.5
})
scale = draw(st.floats(min_value=1, max_value=1))
scores_shape = [in_shape[0], num_anchors, in_shape[2], in_shape[3]]
bbox_delta_shape = [
scores_shape[0], scores_shape[1] * 4, scores_shape[2],
scores_shape[3]
]
pre_nms_topN = draw(st.integers(min_value=2000, max_value=8000))
post_nms_topN = draw(st.integers(min_value=1000, max_value=1500))
nms_thresh = draw(st.floats(min_value=0.5, max_value=0.8))
min_size = draw(st.floats(min_value=2, max_value=4))
eta = draw(st.floats(min_value=0.5, max_value=1.5))
def generate_im_info(*args, **kwargs):
return np.array(
[in_shape[2] * stride[0], in_shape[3] * stride[1],
scale]).astype(np.float32)
generate_proposals_op = OpConfig(
type="generate_proposals",
inputs={
"Scores": ["scores_data"],
"BboxDeltas": ["bbox_delta_data"],
"ImInfo": ["im_info_data"],
"Anchors": ["anchors_data"],
"Variances": ["variance_data"]
},
outputs={
"RpnRois": ["rpn_rois_data"],
"RpnRoiProbs": ["rpn_rois_probs_data"],
"RpnRoisNum": ["rpn_rois_num_data"]
},
attrs={
"pre_nms_topN": pre_nms_topN,
"post_nms_topN": post_nms_topN,
"nms_thresh": nms_thresh,
"min_size": min_size,
"eta": eta
})
program_config = ProgramConfig(
ops=[anchor_generator_op, generate_proposals_op],
weights={},
inputs={
"input_data": TensorConfig(shape=in_shape),
"scores_data": TensorConfig(shape=scores_shape),
"bbox_delta_data": TensorConfig(shape=bbox_delta_shape),
"im_info_data":
TensorConfig(data_gen=partial(generate_im_info))
},
outputs=[
"rpn_rois_data", "rpn_rois_probs_data", "rpn_rois_num_data"
])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["anchor_generator"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
f586fe3dbe958a601f1a99e6f4f461331e4eb4b8
|
a42c73c33f0ed093a57b077ee726eb60bd3a9410
|
/tests/res/mnist.py
|
5ad533e2b2b65691e6655eef9b540c700fd6ceb5
|
[] |
no_license
|
chriamue/mnistclassifier
|
81485e70bc6d94aeea5bb84ae66ac45fe5dbac51
|
1c02663f2444bbe195356b65a9ce4deecd763100
|
refs/heads/master
| 2020-04-14T05:04:57.706819
| 2019-01-07T09:44:11
| 2019-01-07T09:44:11
| 163,652,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
# source: https://github.com/pytorch/examples/blob/master/mnist/main.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class mnist(nn.Module):
def __init__(self, **kwargs):
super(mnist, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
|
[
"chriamue@gmail.com"
] |
chriamue@gmail.com
|
7f6038af767e1779c691a01c5838ef93949692f0
|
2beecbbcc8d3b21d6c536885dc10570bf20044bb
|
/week_four/media_clone/media_app/models.py
|
9d3dc421e33c69a41301b3d6ba06b11aa7a32289
|
[] |
no_license
|
MTaylorfullStack/python_april
|
05724dff121370f72b14beaf9737bcd39292111b
|
e15a496dc5ea3e0cb5f966000bfb7ad30c6bce28
|
refs/heads/master
| 2022-07-22T15:55:21.335557
| 2020-05-21T01:02:11
| 2020-05-21T01:02:11
| 255,476,577
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
from django.db import models
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Message_Post(models.Model):
message = models.CharField(max_length=255)
poster = models.ForeignKey(User, related_name="message_posts", on_delete=models.CASCADE)
likes = models.ManyToManyField(User, related_name="likes")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# Create your models here.
|
[
"mtaylor@codingdojo.com"
] |
mtaylor@codingdojo.com
|
81d7c0b15b03f6fd57c59e9e2efe51c989c26b7f
|
dc3c88f1fe5c80147e4c52ee6ec3136307ec9702
|
/copyPluginName/readMe_model.py
|
e68cca9deb5a4c208e6e7256e693996c1687b6a5
|
[] |
no_license
|
ypapax/all_sublime_plugins
|
062f9b9992a093a02e6b905c1329c681c8532034
|
8b10e471233bd6c2e77907cf5569b0ddccfc88f9
|
refs/heads/master
| 2021-01-15T21:10:08.029750
| 2015-08-16T06:32:51
| 2015-08-16T06:32:51
| 40,391,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
import re
import sys
sys.path.insert(0, '/Users/maks/Library/Application Support/Sublime Text 3/Packages/moveNearReplace')
import filer2
def read(self):
view = self.window.active_view()
filename = view.file_name()
data = filer2.read(filename)
return data
def windowPluginName(self):
data = read(self)
m = re.findall(r'class (.+)Command\(sublime_plugin', data)
pluginName = m[0]
return pluginName
|
[
"maxYefr@gmail.com"
] |
maxYefr@gmail.com
|
7902e49a5df27303eec60da89c20f7aefa4c720d
|
7db3916d8ac8a66a954d230e43bb74b37f81357c
|
/15day/06-用进程池创建进程.py
|
8f6ee1c366d1b43c8dca4fa6d5a3c44a7a884afe
|
[] |
no_license
|
2001128/2_1805
|
2fc96bc6f8e2afcd9d4743891ecd87b754c28cc8
|
b3d4bfab2703a7c6aa1c280669376efeab28cad1
|
refs/heads/master
| 2020-03-22T20:53:14.903808
| 2018-07-30T06:04:49
| 2018-07-30T06:04:49
| 140,639,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
from multiprocessing import Pool
import time
def work(msg):
for i in range(10):
time.sleep(1)
print("嘿嘿嘿%s"%msg)
p = Pool(3)#最大装3个进程
for i in range(6):
p.apply_async(work,(i,))#非阻塞
#p.apply(work,(i,))#阻塞
print("添加一个")
p.close()
p.join()
|
[
"335775879@qq.com"
] |
335775879@qq.com
|
d711b04956217dda83f8d47d7a2a5a5ee2d06a86
|
14e36010b98895e08bd9edfcbc60dce30cbfb82b
|
/oneflow/compatible_single_client_python/framework/local_blob.py
|
8983fd0ce3ae9d403eddf84b197066a33689b23e
|
[
"Apache-2.0"
] |
permissive
|
duzhanyuan/oneflow
|
a9719befbfe112a7e2dd0361ccbd6d71012958fb
|
c6b47a3e4c9b5f97f5bc9f60bc1401313adc32c5
|
refs/heads/master
| 2023-06-21T20:31:55.828179
| 2021-07-20T16:10:02
| 2021-07-20T16:10:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import numpy as np
from oneflow.compatible_single_client_python.framework import (
remote_blob as remote_blob_util,
)
import oneflow._oneflow_internal
import traceback
class LocalBlob(object):
# TODO(chengcheng): maybe not need LocalBlob.
def __init__(self, ndarray, is_dynamic):
self.ndarray_ = ndarray
self.is_dynamic_ = is_dynamic
@property
def is_dynamic(self):
return self.is_dynamic_
def ndarray_list(self):
print(
"WARNING:",
"LocalBlob.ndarray_list is deprecated, please use LocalBlob.numpy()\n",
traceback.format_stack()[-2],
)
return self.numpy_list()
def numpy_list(self):
return [self.numpy()]
def ndarray(self):
print(
"WARNING:",
"LocalBlob.ndarray is deprecated, please use LocalBlob.numpy()\n",
traceback.format_stack()[-2],
)
return self.numpy()
def numpy(self, parallel_id=None):
assert parallel_id is None or parallel_id == 0
return self.ndarray_
def parallel_num(self):
return 1
def __getattr__(self, attr):
return getattr(self.numpy(), attr)
def MakeLocalBlob4EagerBlob(eager_blob):
# TODO(chengcheng): refactor eager local blob.
assert isinstance(eager_blob, oneflow._oneflow_internal.EagerBlobTrait)
if isinstance(eager_blob, oneflow._oneflow_internal.EagerMirroredBlob):
assert eager_blob.numpy_size() == 1
return LocalBlob(eager_blob.numpy(), is_dynamic=eager_blob.is_dynamic,)
elif isinstance(eager_blob, oneflow._oneflow_internal.EagerConsistentBlob):
return LocalBlob(eager_blob.numpy(), is_dynamic=False)
else:
raise NotImplementedError
non_override_field = set(
[
"__class__",
"__doc__",
"__new__",
"__init__",
"__del__",
"__call__",
"__getattr__",
"__getattribute__",
"__setattr__",
"__delattr__",
"__dir__",
"__get__",
"__set__",
"__delete__",
]
)
def MakeBlobMethod(field_name):
def ConvertOtherArgs(args):
return [x.numpy() if isinstance(x, LocalBlob) else x for x in args]
return lambda self, *args: getattr(self.numpy(), field_name)(
*ConvertOtherArgs(args)
)
for field_name in dir(np.ndarray):
if field_name.startswith("__") == False:
continue
if field_name in non_override_field:
continue
if hasattr(LocalBlob, field_name) == False:
setattr(LocalBlob, field_name, MakeBlobMethod(field_name))
|
[
"noreply@github.com"
] |
duzhanyuan.noreply@github.com
|
60f4317f06b40db9362ee145924634045e727923
|
d3cac61f30d7a76eb61560ac54b0b8d11fd63a19
|
/src/model/model.py
|
33a6eee5e2b1a4916972b46a64914ee6f015bb93
|
[
"MIT"
] |
permissive
|
Lukeeeeee/FISDNN
|
a676f7f73b8f7b54386054daf320b6a683d89b02
|
49361770fe987337b16a296c00cfd6562a7e95ed
|
refs/heads/master
| 2021-08-24T00:59:46.587476
| 2017-12-07T10:32:58
| 2017-12-07T10:32:58
| 113,009,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import tensorflow as tf
class Model(object):
standard_key_list = []
def __init__(self, config, sess_flag=False, data=None):
self.config = config
self.data = data
self.net = None
if sess_flag is True:
self.sess = tf.InteractiveSession()
def create_model(self, *args, **kwargs):
pass
def create_training_method(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
def eval_tensor(self, *args, **kwargs):
pass
def predict(self, *args, **kwargs):
pass
def save_model(self, *args, **kwargs):
pass
def load_model(self, *args, **kwargs):
pass
@property
def var_list(self):
return self.net.all_params
|
[
"lukedong123@gmail.com"
] |
lukedong123@gmail.com
|
9cb2f1bdd1bdc428bbeab2a4bcd011661e134f04
|
bad44a92fb338260f9c077689d7fa5472526c3fe
|
/models/tensorflow/google_bert/optimization_test.py
|
4f2dcf133f1bc4d4531fc9b82432d149be054b21
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/nnfusion
|
ebc4c06331b8e93dbf5e176e5ecd3382e322ff21
|
bd4f6feed217a43c9ee9be16f02fa8529953579a
|
refs/heads/main
| 2023-08-25T17:41:37.517769
| 2022-09-16T05:59:01
| 2022-09-16T05:59:01
| 252,069,995
| 872
| 157
|
MIT
| 2023-07-19T03:06:21
| 2020-04-01T04:15:38
|
C++
|
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import optimization
import tensorflow as tf
class OptimizationTest(tf.test.TestCase):
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
tf.test.main()
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.