blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4820edc7e76561ec54b8f59602904b85f5792be0 | a31c42e99785fca5793f08be8b9f312f75b01973 | /test/util_fixtures.py | a05b608a34de787645ddc0e85464fdf60f2ae32a | [
"MIT"
] | permissive | gxpjzbg/machin | 6389beb50fad096d740f8f15031073289b9925c7 | d10727b52d981c898e31cdd20b48a3d972612bb6 | refs/heads/master | 2023-05-03T21:51:56.631559 | 2021-05-21T20:42:10 | 2021-05-21T20:42:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | import torch as t
import pytest
from test.data.archive import Archive
from test.data.all import generate_all, get_all
@pytest.fixture()
def gpu(pytestconfig):
dev = pytestconfig.getoption("gpu_device")
if dev is not None and dev.startswith("cuda"):
return dev
pytest.skip(f"Requiring GPU but provided `gpu_device` is {dev}")
@pytest.fixture(params=["cpu", "gpu"])
def device(pytestconfig, request):
if request.param == "cpu":
return "cpu"
else:
dev = pytestconfig.getoption("gpu_device")
if dev is not None and dev.startswith("cuda"):
return dev
pytest.skip(f"Requiring GPU but provided `gpu_device` is {dev}")
@pytest.fixture(params=["float32", "float64"])
def dtype(pytestconfig, request):
if request.param == "float32":
return t.float32
return t.float64
@pytest.fixture()
def mp_tmpdir(tmpdir):
"""
For multiprocessing, sharing the same tmpdir across all processes
"""
return tmpdir.make_numbered_dir()
@pytest.fixture(scope="session")
def archives():
# prepare all test data archives
generate_all()
return get_all()
__all__ = ["gpu", "device", "dtype", "mp_tmpdir", "archives"]
| [
"hanhanmumuqq@163.com"
] | hanhanmumuqq@163.com |
1da31dae5b70c2f8a4ecb8853fee254dbf41d97b | 8adec48dfaee1cdfd6c7f4d2fb3038aa1c17bda6 | /WProf/build/masters/master.chromium.memory/master_gatekeeper_cfg.py | 539c07011afd193f9392d64309ec408591fb8b82 | [] | no_license | kusoof/wprof | ef507cfa92b3fd0f664d0eefef7fc7d6cd69481e | 8511e9d4339d3d6fad5e14ad7fff73dfbd96beb8 | refs/heads/master | 2021-01-11T00:52:51.152225 | 2016-12-10T23:51:14 | 2016-12-10T23:51:14 | 70,486,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master import gatekeeper
from master import master_utils
# This is the list of the builder categories and the corresponding critical
# steps. If one critical step fails, gatekeeper will close the tree
# automatically.
# Note: don't include 'update scripts' since we can't do much about it when
# it's failing and the tree is still technically fine.
categories_steps = {
'': ['update'],
'testers': [
'base_unittests',
'browser_tests',
'cacheinvalidation_unittests',
'content_unittests',
'courgette_unittests',
'crypto_unittests',
'googleurl_unittests',
'ipc_tests',
'installer_util_unittests',
'jingle_unittests',
'media_unittests',
'mini_installer_test',
'nacl_integration',
'net_unittests',
'printing_unittests',
'remoting_unittests',
'sql_unittests',
'test_shell_tests',
'unit_tests',
],
'compile': ['compile']
}
exclusions = {
}
forgiving_steps = ['update_scripts', 'update']
def Update(config, active_master, c):
c['status'].append(gatekeeper.GateKeeper(
fromaddr=active_master.from_address,
categories_steps=categories_steps,
exclusions=exclusions,
relayhost=config.Master.smtp,
subject='buildbot %(result)s in %(projectName)s on %(builder)s, '
'revision %(revision)s',
extraRecipients=active_master.tree_closing_notification_recipients,
lookup=master_utils.FilterDomain(),
forgiving_steps=forgiving_steps,
public_html='../master.chromium/public_html',
sheriffs=['sheriff'],
tree_status_url=active_master.tree_status_url,
use_getname=True))
| [
"kusoof@kookaburra.(none)"
] | kusoof@kookaburra.(none) |
3de9065870753636844318616c7686d4aad6f0da | 0b358a0d64eb03655c030b36c0ae87880b153951 | /mmdet/models/detectors/maskformer.py | f7257d2547d1644fe9f677f8883223e1b992288c | [] | permissive | jshilong/DDQ | db05ff309d63316c62faa59b28c66d65eef973d1 | de9331e4579aaafab4d69e3a9a3c6638efc5392c | refs/heads/main | 2023-06-03T15:02:09.949907 | 2023-05-24T03:32:12 | 2023-05-24T03:32:12 | 498,974,099 | 199 | 6 | Apache-2.0 | 2022-06-02T05:01:53 | 2022-06-02T03:10:25 | null | UTF-8 | Python | false | false | 7,585 | py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
from mmdet.core import INSTANCE_OFFSET
from mmdet.core.visualization import imshow_det_bboxes
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_."""
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
panoptic_head.update(train_cfg=train_cfg)
panoptic_head.update(test_cfg=test_cfg)
self.panoptic_head = build_head(panoptic_head)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_dummy(self, img, img_metas):
"""Used for computing network flops. See
`mmdetection/tools/analysis_tools/get_flops.py`
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
outs = self.panoptic_head(x, img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_semantic_seg,
gt_bboxes_ignore=None,
**kargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
gt_masks (list[BitmapMasks]): true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (list[tensor]): semantic segmentation mask for
images.
gt_bboxes_ignore (list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Defaults to None.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# add batch_input_shape in img_metas
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_semantic_seg,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, **kwargs):
"""Test without augmentation."""
feat = self.extract_feat(img)
mask_results = self.panoptic_head.simple_test(feat, img_metas,
**kwargs)
results = []
for mask in mask_results:
result = {'pan_results': mask.detach().cpu().numpy()}
results.append(result)
return results
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError
def onnx_export(self, img, img_metas):
raise NotImplementedError
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| [
"2392587229zsl@gmail.com"
] | 2392587229zsl@gmail.com |
356ad1ce285bcd5f67cdc1a4b41cdc2877e0f680 | 54857571461a579bed30cee27871aaa5fe396bcc | /nltk-0.9.7/src/nltk/wordnet/browse.py | 642456d6a2fad207fc1a725391e1ab038bfe1a19 | [] | no_license | ahmedBazaz/affective-text-classification | 78375182e800b39e0e309e8b469e273c0d9590f0 | 719e9b26e60863c620662564fb9cfeafc004777f | refs/heads/master | 2021-01-10T14:50:01.100274 | 2009-01-09T03:59:01 | 2009-01-09T03:59:01 | 48,296,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,094 | py | # Natural Language Toolkit: Wordnet Interface: Wordnet Text Mode Browser
#
# Copyright (C) 2001-2008 NLTK Project
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Jussi Salmela <jtsalmela@users.sourceforge.net> (modifications)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""Natural Language Toolkit: Wordnet Interface: Wordnet Text Mode Browser
See also the NLTK Wordnet Graphical Browser in nltk.wordnet.browser
"""
from textwrap import TextWrapper
from random import randint
from util import *
from dictionary import *
tw = TextWrapper(subsequent_indent=" ")
def show(synsets, index):
return "%d %s;" % (index, synsets[index][0])
def print_gloss(synsets, index):
print index, "\n".join(tw.wrap(synsets[index].gloss))
def print_all_glosses(synsets):
for index in range(len(synsets)):
print_gloss(synsets, index)
def print_all(synsets):
for index in range(len(synsets)):
print show(synsets, index),
print
def print_help():
print "="*60
print "Lookup a word by typing it and finishing with Enter."
print "Reserved words -- letters and numbers used as browser commands --"
print "can be searched by preceeding them with an asterisk *."
print
print "Words have numbered senses, pick a sense by typing a number."
print
print "Commands are a letter followed by Enter:"
print " d=down, u=up, g=gloss, s=synonyms, a=all-senses"
print " v=verbose, r=random, q=quit"
print
print "Choose POS with: N=nouns, V=verbs, J=adjectives, R=adverbs"
print "="*60
def new_word(word):
D = None
for pos,sec in ((N,"N"), (V,"V"), (ADJ,"J"), (ADV,"R")):
if word in pos:
if not D: D = pos
print sec,
print_all(pos[word])
if D: synsets = D[word]
else:
print "Word '%s' not found! Choosing a random word." % word
D = N
synsets = random_synset(D)
print "N",
print_all(N[synsets[0][0]])
return D, synsets
def random_synset(D):
return D[randint(0,len(D)-1)]
def browse(word=" ", index=0):
"""
Browse WordNet interactively, starting from the specified word, and
navigating the WordNet hierarchy to synonyms, hypernyms, hyponyms, and so on.
@type word: C{string}
@param word: the word to look up in WordNet
@type index: C{int}
@param index: the sense number of this word to use (optional)
"""
print "Wordnet browser (type 'h' for help)"
D, synsets = new_word(word)
while True:
if index >= len(synsets):
index = 0
input = ''
while input == '':
if synsets:
prompt = "%s_%d/%d>" % (synsets[index][0], index, len(synsets))
input = raw_input(prompt)
else:
input = raw_input("> ") # safety net
# word lookup
if len(input) > 1 and not input.isdigit():
if input[0] == "*":
word = input[1:]
else:
word = input.lower()
D, synsets = new_word(word)
index = 0
# sense selection
elif input.isdigit():
if int(input) < len(synsets):
index = int(input)
print_gloss(synsets, index)
else:
print "There are %d synsets" % len(synsets)
# more info
elif input == "a":
print_all(synsets)
elif input == "g":
print_gloss(synsets, index)
elif input == "v":
print_all_glosses(synsets)
elif input == "s":
print "Synonyms:", ' '.join(word for word in synsets[index])
# choose part-of-speech
elif input in "NVJR":
ind = "NVJR".index(input)
pos = [N, V, ADJ, ADV][ind]
s = ["noun", "verb", "adjective", "adverb"][ind]
if word in pos:
D = pos
synsets = D[word]
else:
print "No " + s + " sense found"
# navigation
elif input == "r":
synsets = random_synset(D)
elif input == "u":
try:
hypernyms = synsets[index][HYPERNYM]
hypernyms[0]
synsets = hypernyms
print_all(synsets)
index = 0
except IndexError:
print "Cannot go up"
elif input == "d":
try:
hyponyms = synsets[index][HYPONYM]
hyponyms[0]
synsets = hyponyms
print_all(synsets)
index = 0
except IndexError:
print "Cannot go down"
# miscellany
elif input == "h" or input == "?":
print_help()
elif input == "q":
print "Goodbye"
break
else:
print "Unrecognised command: %s" % input
print "Type 'h' for help"
def demo():
print_help()
print
browse()
if __name__ == '__main__':
demo()
__all__ = ["demo"]
| [
"tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883"
] | tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883 |
e088689ba503de3fb63bb4e6d818c3b31043f29f | 716501ff054d10f095879f986d74882778fa72c5 | /tests/plugins/test_openrectv.py | 6ad67ba62479db94f42434c339a3b6175a88477f | [
"BSD-2-Clause"
] | permissive | TheDrHax/streamlink | 017fdfd999277089a0c89a209ba4f64df9e54916 | 4dfd0d516fd8484438389518985e3b5131b7a253 | refs/heads/master | 2021-10-26T17:43:50.402219 | 2020-10-11T22:43:53 | 2021-10-25T19:10:01 | 223,461,822 | 4 | 0 | BSD-2-Clause | 2019-11-22T18:25:47 | 2019-11-22T18:25:46 | null | UTF-8 | Python | false | false | 382 | py | from streamlink.plugins.openrectv import OPENRECtv
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlOPENRECtv(PluginCanHandleUrl):
__plugin__ = OPENRECtv
should_match = [
'https://www.openrec.tv/live/DXRLAPSGTpx',
'https://www.openrec.tv/movie/JsDw3rAV2Rj',
]
should_not_match = [
'https://www.openrec.tv/',
]
| [
"gravyboat@users.noreply.github.com"
] | gravyboat@users.noreply.github.com |
e894a72950212d1db457da5dc7166e1c722016aa | 7f1e0158e70b69bfa353661bfb2eabda9ee5c56c | /tests/models/validators/v2_2_1/jsd_b3f79d3b45b98849d9180cc08018e.py | 92b7d3e181ba8446b8fba1d18d39842a7b3e03b1 | [
"MIT"
] | permissive | Jerbuck/dnacentersdk | 97fb11844410ec7ab49aec35a30979d6288a87fd | ef2adde6113e7a6acd28a287007eb470fa39d31f | refs/heads/master | 2023-07-31T13:43:01.108243 | 2021-09-14T17:41:19 | 2021-09-14T17:41:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,857 | py | # -*- coding: utf-8 -*-
"""Cisco DNA Center getTopologyDetails data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorB3F79D3B45B98849D9180Cc08018E(object):
"""getTopologyDetails request schema definition."""
def __init__(self):
super(JSONSchemaValidatorB3F79D3B45B98849D9180Cc08018E, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"properties": {
"id": {
"type": "string"
},
"links": {
"items": {
"properties": {
"additionalInfo": {
"type": "object"
},
"endPortID": {
"type": "string"
},
"endPortIpv4Address": {
"type": "string"
},
"endPortIpv4Mask": {
"type": "string"
},
"endPortName": {
"type": "string"
},
"endPortSpeed": {
"type": "string"
},
"greyOut": {
"type": "boolean"
},
"id": {
"type": "string"
},
"linkStatus": {
"type": "string"
},
"source": {
"type": "string"
},
"startPortID": {
"type": "string"
},
"startPortIpv4Address": {
"type": "string"
},
"startPortIpv4Mask": {
"type": "string"
},
"startPortName": {
"type": "string"
},
"startPortSpeed": {
"type": "string"
},
"tag": {
"type": "string"
},
"target": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"nodes": {
"items": {
"properties": {
"aclApplied": {
"type": "boolean"
},
"additionalInfo": {
"type": "object"
},
"customParam": {
"properties": {
"id": {
"type": "string"
},
"label": {
"type": "string"
},
"parentNodeId": {
"type": "string"
},
"x": {
"type": "integer"
},
"y": {
"type": "integer"
}
},
"type": "object"
},
"dataPathId": {
"type": "string"
},
"deviceType": {
"type": "string"
},
"family": {
"type": "string"
},
"fixed": {
"type": "boolean"
},
"greyOut": {
"type": "boolean"
},
"id": {
"type": "string"
},
"ip": {
"type": "string"
},
"label": {
"type": "string"
},
"networkType": {
"type": "string"
},
"nodeType": {
"type": "string"
},
"order": {
"type": "integer"
},
"osType": {
"type": "string"
},
"platformId": {
"type": "string"
},
"role": {
"type": "string"
},
"roleSource": {
"type": "string"
},
"softwareVersion": {
"type": "string"
},
"tags": {
"items": {
"type": "string"
},
"type": "array"
},
"upperNode": {
"type": "string"
},
"userId": {
"type": "string"
},
"vlanId": {
"type": "string"
},
"x": {
"type": "integer"
},
"y": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
},
"version": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
a87fd0a0d8ad9b8e45fb3ca27a5dd677c294d57c | fc5a4e7c181dc4371ee1ac3b5b077901ca91f889 | /src/python/WMCore/WMBS/MySQL/Jobs/AutoIncrementCheck.py | 0ab3a3d0ad3b761194f40c77fbd9380329f6efcb | [
"Apache-2.0"
] | permissive | vkuznet/WMCore | 4900a6320c52210aeaaafea90f1be1ef7bc932a6 | de110ccf6fc63ef5589b4e871ef4d51d5bce7a25 | refs/heads/master | 2023-09-01T12:19:26.134554 | 2023-08-24T09:50:59 | 2023-08-24T09:50:59 | 19,494,576 | 0 | 0 | Apache-2.0 | 2022-02-24T18:36:38 | 2014-05-06T13:08:55 | Python | UTF-8 | Python | false | false | 1,389 | py | #!/usr/bin/env python
"""
_AutoIncrementCheck_
AutoIncrement Check
Test to properly set the autoIncrement value
First, find the highest jobID either in wmbs_job or in wmbs_highest_job
Then reset AUTO_INCREMENT to point to that.
"""
__all__ = []
import logging
from WMCore.Database.DBFormatter import DBFormatter
class AutoIncrementCheck(DBFormatter):
"""
_AutoIncrmentCheck_
Check and properly set the auto_increment counter for wmbs_job
"""
highestSQL = """SELECT IFNULL(MAX(id), 0) FROM wmbs_job"""
currentSQL = """SELECT Auto_increment FROM information_schema.tables WHERE table_name='wmbs_job' AND table_schema=DATABASE()"""
alterSQL = "ALTER TABLE wmbs_job AUTO_INCREMENT = :value"
def execute(self, input = 0, conn = None, transaction = False):
"""
_execute_
"""
highest = self.dbi.processData(self.highestSQL, {}, conn = conn,
transaction = transaction)[0].fetchall()[0][0]
current = self.dbi.processData(self.currentSQL, {}, conn = conn,
transaction = transaction)[0].fetchall()[0][0]
value = max(input + 1, highest + 1)
if value > current:
self.dbi.processData(self.alterSQL, {'value': value},
conn = conn, transaction = transaction)
return
| [
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] | sfoulkes@4525493e-7705-40b1-a816-d608a930855b |
2636c033740d569cbff6da29f52c020bb929bcde | 89e6c3548fbdd06178aae712de1ff19004bc2faa | /my_django/contrib/gis/geos/linestring.py | 8b47fd894a494599e4513cb0eb13d014bf2dd4b1 | [
"BSD-3-Clause"
] | permissive | bhgv/ublog_git.hg.repo-django.python-engine | a3f3cdcbacc95ec98f022f9719d3b300dd6541d4 | 74cdae100bff5e8ab8fb9c3e8ba95623333c2d43 | refs/heads/master | 2020-03-23T01:04:07.431749 | 2018-07-25T12:59:21 | 2018-07-25T12:59:21 | 140,899,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,586 | py | from my_django.contrib.gis.geos.base import numpy
from my_django.contrib.gis.geos.coordseq import GEOSCoordSeq
from my_django.contrib.gis.geos.error import GEOSException
from my_django.contrib.gis.geos.geometry import GEOSGeometry
from my_django.contrib.gis.geos.point import Point
from my_django.contrib.gis.geos import prototypes as capi
class LineString(GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
#### Python 'magic' routines ####
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1: coords = args[0]
else: coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords: ndim = len(coords[0])
else: raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in xrange(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim: raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2: raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim==3))
for i in xrange(ncoords):
if numpy_coords: cs[i] = coords[i,:]
elif isinstance(coords[i], Point): cs[i] = coords[i].tuple
else: cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid', None)
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims #
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3): raise TypeError('Dimension mismatch.')
#### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in xrange(len(self))]
if numpy: return numpy.array(lst) # ARRRR!
else: return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz: return None
else: return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
| [
"bhgv.empire@gmail.com"
] | bhgv.empire@gmail.com |
6aa80f8c9a0e453c816ad6e61647dafa2f89fa01 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fizz_20200607124830.py | 7edd28cb237df1a2b42f86950f78fd2d8353787b | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | def fizz(num):
newNumber = []
for i in range(1,num+1):
newNumber.append(i)
for j in range(len(newNumber)):
if newNumber[j] % 3== 0:
newNumber[j] = "Fizz"
elif newNumber[j] % 5 == 0:
newNumber[j] = "Buzz"
elif newNumber[j] % 3 == 0 and newNumber[j] % 5 == 0:
newNumber[j] ="StringHeader"
else:
newNumber[j] = newNumber[j]
string = [str(i) for i in newNumber]
print(string)
fizz(8) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
4ede3d6430189f396047e99b23d6ba39496b371d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2499/60703/279909.py | dd36db8a37d5cb6d0821368c58f16d876f7ab06e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | N = int(input())
Add = []
Query = []
Del = []
for i in range(N):
l = input()
this = l.split(" ")
if(this[0]=="Add"):
# a,b,c = [int(x) for x in this[1:]]
if("" in this):
this.remove("")
a,b,c = int(this[1]),int(this[2]),int(this[3])
# print(this)
Add.append([a,b,c,True])
if(this[0]=="Del"):
de = int(this[1])
Add[de-1][3] = False
if(this[0]=="Query"):
x = int(this[1])
res = 0
for inner in Add:
if(inner[3]==True):
a, b, c = inner[:-1]
if(a*x+b>c):
res+=1
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
137335f8efa4bf152af0ece0543a2bfab5a63393 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/program/util/ProgramMergeFilter.pyi | c81161bd1644ebe54e013458a0123d7441223c37 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,605 | pyi | from typing import List
import java.lang
class ProgramMergeFilter(object):
"""
The ProgramMergeFilter is used to specify which portions of a
program should be merged into another program.
It indicates the types of program differences to merge.
Each merge type can have its filter set to IGNORE or REPLACE.
IGNORE indicates no interest in replacing or merging that type of difference.
REPLACE indicates to replace differences in program1 with differences of
that type from program2.
Some merge types (for example, COMMENTS and SYMBOLS) allow the filter to be
set to MERGE.
MERGE indicates that the type should
be taken from Program2 and merged into Program1 with whatever is alreaady there.
"""
ALL: int = 131071
BOOKMARKS: int = 2048
BYTES: int = 2
CODE_UNITS: int = 12
COMMENTS: int = 992
DATA: int = 8
EOL_COMMENTS: int = 128
EQUATES: int = 16384
FUNCTIONS: int = 8192
FUNCTION_TAGS: int = 65536
IGNORE: int = 0
INSTRUCTIONS: int = 4
INVALID: int = -1
MERGE: int = 2
PLATE_COMMENTS: int = 32
POST_COMMENTS: int = 512
PRE_COMMENTS: int = 64
PRIMARY_SYMBOL: int = 32768
PROGRAM_CONTEXT: int = 1
PROPERTIES: int = 4096
REFERENCES: int = 16
REPEATABLE_COMMENTS: int = 256
REPLACE: int = 1
SYMBOLS: int = 1024
@overload
def __init__(self):
"""
Creates new ProgramMergeFilter with none of the merge types selected.
"""
...
@overload
def __init__(self, filter: ghidra.program.util.ProgramMergeFilter):
"""
Creates new ProgramMergeFilter that is equal to the specified ProgramMergeFilter.
"""
...
@overload
def __init__(self, type: int, filter: int):
"""
Creates new ProgramMergeFilter with the specified merge types selected.
@param type the type of difference to look for between the programs.
@param filter IGNORE, REPLACE, or MERGE. Indicates
which program difference to include of the specified type.
If a particular type cannot be set to MERGE then it will be set to REPLACE.
"""
...
def equals(self, obj: object) -> bool:
"""
Determines whether or not this filter is equal to the object that
is passed in.
@param obj the object to compare this one with.
@return true if the filter matches this one.
"""
...
@staticmethod
def filterToName(type: int) -> unicode:
"""
<CODE>filterToName</CODE> returns the string associated with an
individual (primary) merge difference setting.
@param type the type of filter.
Valid types are: IGNORE, REPLACE, MERGE.
@return the string indicating the merge difference setting.
"""
...
def getClass(self) -> java.lang.Class: ...
def getFilter(self, type: int) -> int:
"""
getFilter determines whether or not the specified type of filter is set.
Valid types are: BYTES, INSTRUCTIONS, DATA,
SYMBOLS, PRIMARY_SYMBOL, COMMENTS, PROGRAM_CONTEXT, PROPERTIES, BOOKMARKS, FUNCTIONS.
INVALID is returned if combinations of merge types (e.g. ALL) are
passed in.
@param type the merge type.
@return IGNORE, REPLACE, or MERGE. INVALID if parameter is a combination of
types or not a predefined primary type.
"""
...
@staticmethod
def getPrimaryTypes() -> List[int]:
"""
Gets all the valid individual types of differences for this filter.
@return an array containing all the currently defined primary difference
types.
"""
...
def hashCode(self) -> int: ...
def isSet(self) -> bool:
"""
Determines if at least one of the filter types is set to REPLACE or MERGE.
@return true if at least one type is set.
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def setFilter(self, type: int, filter: int) -> None:
"""
setFilter specifies whether or not the indicated type of item will
not be included by the filter (IGNORE), replaced in the first program using the type of
item in the second program (REPLACE), or included from both programs (MERGE).
Valid types are: BYTES, INSTRUCTIONS, DATA, REFERENCES,
SYMBOLS, PRIMARY_SYMBOL, COMMENTS, PROPERTIES, BOOKMARKS, FUNCTIONS, ALL, or combinations of
these "OR"ed together.
if <CODE>MERGE</CODE> is not valid for an included primary type, then it
will be set to <CODE>REPLACE</CODE> instead for that primary type.
@param type the type(s) of difference(s) to include.
@param filter IGNORE, REPLACE, or MERGE. Indicates whether to include none,
one, or both programs' differences of the specified type.
"""
...
def toString(self) -> unicode:
"""
Returns a printable string indicating the current settings of this filter.
@return the current settings for this filter.
"""
...
@staticmethod
def typeToName(type: int) -> unicode:
"""
<CODE>typeToName()</CODE> returns the name of a predefined merge type.
Only predefined types, as specified in <CODE>ProgramMergeFilter</CODE>,
will return a name. Otherwise, an empty string is returned.
@param type the type of merge difference whose name is wanted.
Valid types are: BYTES, INSTRUCTIONS, DATA, REFERENCES,
SYMBOLS, PRIMARY_SYMBOL, COMMENTS, PROGRAM_CONTEXT, PROPERTIES, BOOKMARKS, FUNCTIONS, ALL.
@return the name of the predefined merge difference type.
Otherwise, the empty string.
"""
...
def validatePredefinedType(self, type: int) -> bool:
"""
validatePredefinedType determines whether or not the indicated type
of filter item is a valid predefined type.
Valid types are: BYTES, INSTRUCTIONS, DATA,
SYMBOLS, PRIMARY_SYMBOL, COMMENTS, PROGRAM_CONTEXT, PROPERTIES, BOOKMARKS, FUNCTIONS, ALL.
@param type the type of difference to look for between the programs.
@return true if this is a pre-defined merge type.
"""
...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def set(self) -> bool: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
f9b4481ce7cba346054da5a19c7cec8344893e57 | d65128e38be0243f279e0d72ef85e7d3c5e116ca | /base/site-packages/emailconfirmation/views.py | e4548f68cf3bb4d499e6cde4de68b8f830c0fcc6 | [
"Apache-2.0"
] | permissive | ZxwZero/fastor | 19bfc568f9a68f1447c2e049428330ade02d451d | dd9e299e250362802032d1984801bed249e36d8d | refs/heads/master | 2021-06-26T06:40:38.555211 | 2021-06-09T02:05:38 | 2021-06-09T02:05:38 | 229,753,500 | 1 | 1 | Apache-2.0 | 2019-12-23T12:59:25 | 2019-12-23T12:59:24 | null | UTF-8 | Python | false | false | 480 | py | from django.shortcuts import render_to_response
from django.template import RequestContext
from emailconfirmation.models import EmailConfirmation
def confirm_email(request, confirmation_key):
confirmation_key = confirmation_key.lower()
email_address = EmailConfirmation.objects.confirm_email(confirmation_key)
return render_to_response("emailconfirmation/confirm_email.html", {
"email_address": email_address,
}, context_instance=RequestContext(request)) | [
"edisonlz@163.com"
] | edisonlz@163.com |
207abfd88b3fe639f0c4efd6eaa871947eb0e48f | 80301f1cffc5afce13256e2ecab6323c5df00194 | /cn.fc/py/E0310.py | 681b9e8da36575a89da4e0f3be858238b6f5a6c3 | [] | no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 1,860 | py | from ED6ScenarioHelper import *
def main():
CreateScenaFile(
FileName = 'E0310 ._SN',
MapName = 'event',
Location = 'E0310.x',
MapIndex = 1,
MapDefaultBGM = "ed60010",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
ScpFunction(
"Function_0_AA", # 00, 0
"Function_1_AB", # 01, 1
)
def Function_0_AA(): pass
label("Function_0_AA")
Return()
# Function_0_AA end
def Function_1_AB(): pass
label("Function_1_AB")
Return()
# Function_1_AB end
SaveToFile()
Try(main)
| [
"zj.yang@qq.com"
] | zj.yang@qq.com |
4d33ba9a57d398620a5cbce24178775f93987cfa | 183cc3bc7b067e30d02321f2f138b241ce619188 | /chess_engine/chess_db/utils.py | c135e1a8cf516b4a4d685f31b1e99eb7d24be524 | [
"Apache-2.0"
] | permissive | yashbonde/chessshhh | 147d880819b404e047035a22e668f0f4e25fb7a6 | 5ee67fc4583d56255bf902a7ad04c1a75822f5bc | refs/heads/master | 2020-11-26T15:44:13.272573 | 2020-01-16T06:35:36 | 2020-01-16T06:35:36 | 229,126,212 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | """this is the complete manager for all things DB
ideally this should be built and maintained using an ORM but
I tried that and it is becoming to much of a deviation and
pushing me away from actual delivery. So move to psycopg as a
quick hack.
"""
import os
import logging
import psycopg2
from psycopg2.extras import RealDictCursor
# custom
# from chess_engine.chess_db.games import create_games_table
# from chess_engine.chess_db.moves import create_moves_table
# from chess_engine.chess_db.users import create_users_table
def connect():
connection = psycopg2.connect(user = os.environ["POSTGRES_USER"],
password = os.environ["POSTGRES_PASSWORD"],
host = os.environ["POSTGRES_HOST"],
port = os.environ["POSTGRES_PORT"],
database = os.environ["POSTGRES_DB"])
cursor = connection.cursor(cursor_factory = RealDictCursor)
return cursor, connection
def close_connection(cursor):
cursor.close()
def execute(cursor, conn, command, log_ = True):
print('>> Executing Query: {}'.format(command))
cursor.execute(command)
conn.commit()
| [
"bonde.yash97@gmail.com"
] | bonde.yash97@gmail.com |
3ff9f73a57e73ab8f0dfb813b98e9717d1b67e0e | 440c9752c30ea2514c2ccbe45cd0a336c4e111a1 | /ProblemSolving/450DSA/Python/src/dynamicprogramming/LongestCommonSubsequence.py | f9e9e32b3161cc768447c092fb23e3c85db274ae | [] | no_license | Snehal2605/Technical-Interview-Preparation | b6c6e578a42fcac5ac42c8ea7745637575e7ed2b | 0069669790f561bdcedbe5c2900d03ff7d3f218d | refs/heads/master | 2023-08-02T16:59:35.479401 | 2021-08-23T13:40:14 | 2021-08-23T13:40:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | """
@author Anirudh Sharma
Given two sequences, find the length of longest subsequence present in both of them.
Both the strings are of uppercase.
"""
def lcs(s1, s2):
# Special case
if s1 is None or len(s1) == 0 or s2 is None or len(s2) == 0:
return 0
# Lengths of the two strings
m, n = len(s1), len(s2)
# Lookup table to store the longest common subsequence
# for a given value of m and n.
lookup = [[0 for y in range(n + 1)] for x in range(m + 1)]
# Populate the table for each character
for i in range(1, m + 1):
for j in range(1, n + 1):
# If current characters are same
if s1[i - 1] == s2[j - 1]:
lookup[i][j] = 1 + lookup[i - 1][j - 1]
else:
lookup[i][j] = max(lookup[i - 1][j], lookup[i][j - 1])
return lookup[m][n]
if __name__ == "__main__":
s1 = "ABCDGH"
s2 = "AEDFHR"
print(lcs(s1, s2))
s1 = "ABC"
s2 = "AC"
print(lcs(s1, s2))
| [
"anirudh03sharma@gmail.com"
] | anirudh03sharma@gmail.com |
d05fd8d4ae335894e444ded3a511408fcf5451e2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_020/ch150_2020_04_13_19_54_04_068508.py | 84beed7fa00b8d84d1c859cd2bbe8918b6594b97 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import math
def calcula_pi(n):
x = 0
for i in range(1,n+1):
x = x + 6/(i**2)
pi = math.sqrt(x)
return pi | [
"you@example.com"
] | you@example.com |
75cb3830e90a4a415a1022620f15c477f7b22e9e | 76e7feaea74beb9d337885dcaa3ee59e26d9db70 | /keras/keras_kfold_manual.py | 5c03e7ca17648887e5a2d5afbf968b3afc8a765f | [] | no_license | sayantansatpati/dlf | 8f9bec134212a6608f2b6854c120253677c71959 | ce8b083f31cd1b4f67ea3718cbbad5cac1eff1f4 | refs/heads/master | 2021-01-11T15:47:02.118653 | 2017-11-14T21:04:19 | 2017-11-14T21:04:19 | 79,931,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | # MLP for Pima Indians Dataset with 10-fold cross validation
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import StratifiedKFold
import numpy
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# define 10-fold cross validation test harness
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
cvscores = []
for train, test in kfold.split(X, Y):
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(8, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X[train], Y[train], nb_epoch=150, batch_size=10, verbose=0)
# evaluate the model
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores))) | [
"sayantan.satpati.sfbay@gmail.com"
] | sayantan.satpati.sfbay@gmail.com |
997a2d96680cfa0d778ef6ff12aff96093852a74 | 83e0a9cb729892aa19488ab847ad2e4486f96e6a | /app/views.py | d6933bf84b5b627a0d81dbe1b1b40ef8d00723df | [] | no_license | pankajgoyal69/flask | 9b4e792a1b9fbdde358a219acb5540e4592f7d0a | 1575509bc6af03bbdc956da7476e326805f6bd5c | refs/heads/master | 2020-03-28T11:10:54.689794 | 2016-08-24T13:44:44 | 2016-08-24T13:44:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | from app import app
from flask import render_template, flash, redirect
from .forms import LoginForm
@app.route('/')
@app.route('/index')
def index():
user = {'nickname': 'Abhi'} # fake user
posts = [ # fake array of posts
{
'author': {'nickname': 'Shashank'},
'body': 'We have to work hard'
},
{
'author': {'nickname': 'Abhi'},
'body': 'We have to work really hard'
}
]
return render_template('index.html',
title='Home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
flash('Login requested for OpenID="%s", remember_me=%s' %
(form.openid.data, str(form.remember_me.data)))
return redirect('/index')
return render_template('login.html',
title='Sign In',
form=form) | [
"abhimanyu98986@gmail.com"
] | abhimanyu98986@gmail.com |
88380fea2bb5e9a477ffa8c32b5d553febd17d86 | bc441bb06b8948288f110af63feda4e798f30225 | /notify_sdk/model/container/container_status_pb2.py | e92513e02b1134c5e2557d29431ff472f32e5192 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,860 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: container_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from notify_sdk.model.container import container_state_pb2 as notify__sdk_dot_model_dot_container_dot_container__state__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='container_status.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x16\x63ontainer_status.proto\x12\tcontainer\x1a\x30notify_sdk/model/container/container_state.proto\"n\n\x0f\x43ontainerStatus\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0crestartCount\x18\x02 \x01(\x05\x12(\n\x05state\x18\x03 \x01(\x0b\x32\x19.container.ContainerState\x12\r\n\x05image\x18\x04 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[notify__sdk_dot_model_dot_container_dot_container__state__pb2.DESCRIPTOR,])
_CONTAINERSTATUS = _descriptor.Descriptor(
name='ContainerStatus',
full_name='container.ContainerStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='container.ContainerStatus.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restartCount', full_name='container.ContainerStatus.restartCount', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='container.ContainerStatus.state', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='container.ContainerStatus.image', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=87,
serialized_end=197,
)
_CONTAINERSTATUS.fields_by_name['state'].message_type = notify__sdk_dot_model_dot_container_dot_container__state__pb2._CONTAINERSTATE
DESCRIPTOR.message_types_by_name['ContainerStatus'] = _CONTAINERSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ContainerStatus = _reflection.GeneratedProtocolMessageType('ContainerStatus', (_message.Message,), {
'DESCRIPTOR' : _CONTAINERSTATUS,
'__module__' : 'container_status_pb2'
# @@protoc_insertion_point(class_scope:container.ContainerStatus)
})
_sym_db.RegisterMessage(ContainerStatus)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
a9b5aaf1308ecd2d78cce7be0f00ca0c4b19165b | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/eve/client/script/environment/effects/TriageMode.py | 26944f400e3839a82d486d9440c43bf374cd5410 | [] | no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\environment\effects\TriageMode.py
from eve.client.script.environment.effects.GenericEffect import STOP_REASON_DEFAULT
from eve.client.script.environment.effects.shipRenderEffect import ShipRenderEffect
class TriageMode(ShipRenderEffect):
__guid__ = 'effects.TriageMode'
def Stop(self, reason=STOP_REASON_DEFAULT):
ShipRenderEffect.Stop(self, reason)
shipID = self.ballIDs[0]
shipBall = self.fxSequencer.GetBall(shipID)
shipBall.TriggerAnimation('normal')
def Start(self, duration):
ShipRenderEffect.Start(self, duration)
shipID = self.ballIDs[0]
shipBall = self.fxSequencer.GetBall(shipID)
shipBall.TriggerAnimation('siege')
def Repeat(self, duration):
ShipRenderEffect.Repeat(self, duration) | [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
e39703acf951b97bafcb09db32ceb855cee4ba62 | a0994965785a4def2c194f6bf06a20b7dda3d11a | /test/test_framelist.py | 6bc7ed51a1ce3fd900aaf572583b4a9dab36f7f3 | [
"MIT"
] | permissive | waytai/kaa | 4285df542b9c1f3504f9ec7818c6b18c9892e739 | a9edc8d3db9a2d4b0035d03d8f67f3949559c4d2 | refs/heads/master | 2021-01-18T11:05:06.556564 | 2013-11-17T16:52:32 | 2013-11-17T16:52:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | from contextlib import ExitStack
from unittest.mock import patch
import kaa
from kaa.ui.framelist import framelistmode
import kaa_testutils
class _frame:
def get_title(self):
return 'title'
@patch('kaa.app', new=kaa_testutils.DmyApp(), create=True)
class TestFrameList(kaa_testutils._TestDocBase):
def test_framelist(self):
with ExitStack() as st:
frames = [_frame(), _frame()]
st.enter_context(patch('kaa.app.get_frames',
create=True, return_value=frames))
st.enter_context(patch('kaa.app.get_activeframe',
create=True, return_value=frames[0]))
doc = framelistmode.FrameListMode.build()
| [
"ishimoto@gembook.org"
] | ishimoto@gembook.org |
c109d04f73c13ce27f78d32f230e129e5a2a4b2b | 4e62248cc376ac9740fe818b2555e127ed9082af | /recv_sekret_packet.py | 8eba1aa559c58ae474dcc928888fc31c50c0b43a | [] | no_license | tigerjibo/sekret_tunnel | 9b86a5d4809c8fd875be770b72a02f1fb783beed | 17145799aa089f64add1441ce58d8432435e3e17 | refs/heads/master | 2021-01-19T22:34:22.642808 | 2015-11-16T19:13:21 | 2015-11-16T19:13:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!/usr/bin/python3.4
# Waits for sekret packet
import socket
UDP_IP = '127.0.0.1'
UDP_PORT = [123, 80]
BUFFER_SIZE = 1024
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT[0]))
print("Connection address: " + str(addr) )
while True:
data, addr = sock.recvfrom(BUFFER_SIZE)
if not data: break
print("received data: " + str(data) + " from " + addr );
#conn.send(data) #echo
| [
"admin@example.com"
] | admin@example.com |
b22ef565333696f7f36ae0eb5e95725b5235ddde | 5d2fe1cbbebec530f1500058525d7a92acd966ca | /algorithms/arrays/transition_point.py | 7d35b8569acd5a60c9abc32c832c831ed2645e59 | [] | no_license | bhaveshAn/pyalgos | 2a4ead675485f1abb2af673cd56b422e6d9dbd5c | 2dd9047ae1778981aae9f467fcdb2e922256beeb | refs/heads/master | 2020-03-30T12:52:46.337328 | 2019-03-23T20:48:54 | 2019-03-23T20:48:54 | 151,245,861 | 2 | 0 | null | 2018-10-27T15:17:04 | 2018-10-02T11:48:51 | Python | UTF-8 | Python | false | false | 403 | py | def transition_point(arr, n):
l = 0
r = n - 1
while l <= r:
mid = (l + r) // 2
if arr[mid] == 0:
l = mid + 1
elif arr[mid] == 1:
if arr[mid-1] == 0:
return mid
else:
r = mid - 1
return -1
arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
n = len(arr)
print(transition_point(arr, n))
| [
"bhaveshanand7@gmail.com"
] | bhaveshanand7@gmail.com |
845006d08ada53158dff1fc48ddf179170c5db3f | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/sets_20200605191550.py | 27c8896fb841f01769a9e972b7d65281598db8df | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def Strings(str):
values = {}
for i in str:
if str[i][0] ==
print(i.split(":"))
Strings(["A:1","B:3","C:3","A:4","B:2"])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
f324c16e7c5c9a81daf3743dd4b08b0cdb50959a | d5e787f85b37f966ccdf0cd5f7a7061eae1c70a8 | /src/transmittals/migrations/0027_auto_20160203_1004.py | 5e368c96ab145e5473d6cf72ea7fa3adec6fa021 | [
"MIT"
] | permissive | iBuilder-Tech/phase | 5ee6cd1fb410de0d067e7b5b8adfea3c4411b62c | cc8f9b9f3e2c31b139d5cce433667c8d5ba2c6f2 | refs/heads/main | 2023-07-10T18:24:20.291136 | 2021-08-31T13:22:17 | 2021-08-31T13:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transmittals', '0026_transmittal_contract_number_new'),
]
operations = [
migrations.AlterIndexTogether(
name='transmittal',
index_together=set([('originator', 'recipient', 'sequential_number', 'status')]),
),
]
| [
"lp@providenz.fr"
] | lp@providenz.fr |
374f4daa45235f03092e797cbb4990294b12144e | 48fad811fa8afeda817233fd8bae3a08329fe01e | /src/rf_model.py | 9c1f0b2b8d9f71da043e4cba69b02ed8e7662f7a | [] | no_license | yanshaojie123/JuliaTaxi | c2423818bc3e24fe664e576a290d584ee5ed047c | 18adc70103a52cebf02fe0423c172f5585a12fce | refs/heads/master | 2021-06-15T08:34:11.752027 | 2017-02-16T05:00:37 | 2017-02-16T05:00:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | #-*- coding: utf-8 -*-
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from data_input import get_train, get_test
ftrain, ftest = 'data/train.dat', 'data/test.dat'
X_train, y_train = get_train(ftrain)
print 'Data is ready ...'
#训练集比例
#scale = int(0.8 * len(X))
#X_train, X_test = X[:scale], X[scale:]
#y_train, y_test = y[:scale], y[scale:]
rf = RandomForestRegressor(max_depth=20, verbose=True)
rf.fit(X_train, y_train)
print 'Random forest training complete ...'
X_test = get_test(ftest)
y_test = rf.predict(X_test)
print 'Prediction is ready ...'
#写文件
tripId = np.array(range(1, len(y_test)+1))
y_predict = np.column_stack((tripId, y_test))
np.savetxt('rst.csv', y_predict, header='pathid,time', comments='', fmt='%d,%f')
#error = []
#for i in range(len(y_test)):
# error.append(round(abs(y_test[i]-rst[i]) / y_test[i], 3))
#print 'error:', sum(error) / len(error)
| [
"1139217488@qq.com"
] | 1139217488@qq.com |
3ff6bbeedf9cc51530a695f48270b10787abe679 | 89e4c3dd91ceb3a4a5e74cfaedbb795152ebd1f9 | /lc856_stack.py | 3651a6895d55f13e12003ad9635ac82876e1356a | [] | no_license | Mela2014/lc_punch | a230af2c9d40b1af4932c800e72698de5b77d61a | 498308e6a065af444a1d5570341231e4c51dfa3f | refs/heads/main | 2023-07-13T03:44:56.963033 | 2021-08-25T05:44:40 | 2021-08-25T05:44:40 | 313,742,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | class Solution:
def scoreOfParentheses(self, s: str) -> int:
count_right, rslt = 0, 0
for i, c in enumerate(s):
if c == "(":
count_right += 1
else:
count_right -= 1
if s[i-1] == '(':
rslt += 2**count_right
return rslt
| [
"noreply@github.com"
] | Mela2014.noreply@github.com |
06d3bfc4c26bf02f23ca2ffcda5df1ed007c62e4 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /code/day17/demo02.py | e47e7a610f7609c3d4230b2fba96645f1a4bb045 | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | """
函数式编程 语法
"""
def fun01():
print("fun01执行喽")
# 调用方法,执行方法体
re1 = fun01()
print(re1) #None
# 将函数赋值给变量
re2 = fun01
# 通过变量,调用函数
re2()
def fun02():
print("fun02执行喽")
# 将函数作为函数的参数进行传递
# 将一个函数的代码(fun02/fun01),注入到另外一个函数中(fun03).
def fun03(func):
print("fun03执行喽")
func()
fun03(fun01)
fun03(fun02)
| [
"598467866@qq.com"
] | 598467866@qq.com |
f25191ba1160170bd5af78a26dc7096bc6da8330 | a1d38a6f514cb41fb59059e0907004489529ee74 | /hw4/code/planarH.py | 0e8a6df9c1ad5a408f479ab1879dffbafd48786c | [] | no_license | jiaqigeng/CMU-16720-Computer-Vision | ed842bdaac0df3a8d52431ae46ec5712e8d7c073 | a386baa6459ac0da9d4daad23dd965291cb72156 | refs/heads/main | 2023-08-18T14:12:17.644729 | 2021-10-15T19:57:24 | 2021-10-15T19:57:24 | 417,595,533 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,662 | py | import numpy as np
import cv2
from BRIEF import briefLite, briefMatch, plotMatches
import matplotlib.pyplot as plt
def computeH(p1, p2):
"""
INPUTS:
p1 and p2 - Each are size (2 x N) matrices of corresponding (x, y)'
coordinates between two images
OUTPUTS:
H2to1 - a 3 x 3 matrix encoding the homography that best matches the linear
equation
"""
assert p1.shape[1] == p2.shape[1]
assert p1.shape[0] == 2
#############################
# TO DO ...
n = p1.shape[1]
A = np.zeros((2*n, 9))
U, V = p2[0, :].reshape(-1, 1), p2[1, :].reshape(-1, 1)
X, Y = p1[0, :].reshape(-1, 1), p1[1, :].reshape(-1, 1)
A[::2] = np.hstack((-U, -V, -np.ones((n, 1)), np.zeros((n, 1)), np.zeros((n, 1)), np.zeros((n, 1)), U * X, V * X, X))
A[1::2] = np.hstack((np.zeros((n, 1)), np.zeros((n, 1)), np.zeros((n, 1)), -U, -V, -np.ones((n, 1)), U * Y, V * Y, Y))
eigen_vals, eigen_vecs = np.linalg.eigh(np.dot(A.T, A))
H2to1 = eigen_vecs[:, 0].reshape((3, 3))
return H2to1 / H2to1[2, 2]
def ransacH(matches, locs1, locs2, num_iter=5000, tol=2):
"""
Returns the best homography by computing the best set of matches using
RANSAC
INPUTS
locs1 and locs2 - matrices specifying point locations in each of the images
matches - matrix specifying matches between these two sets of point locations
nIter - number of iterations to run RANSAC
tol - tolerance value for considering a point to be an inlier
OUTPUTS
bestH - homography matrix with the most inliers found during RANSAC
"""
###########################
# TO DO ...
p1_original, p2_original = locs1[matches[:, 0], :2].T, locs2[matches[:, 1], :2].T
best_num_inliers = 0
best_inliers_p1, best_inliers_p2 = None, None
for i in range(num_iter):
idx = np.random.choice(np.arange(p1_original.shape[1]), 4)
p1 = p1_original[:, idx]
p2 = p2_original[:, idx]
H = computeH(p1, p2)
p1_homo = np.vstack((p1_original, np.ones(p1_original.shape[1])))
p2_homo = np.vstack((p2_original, np.ones(p2_original.shape[1])))
p2_transform = np.dot(H, p2_homo)
p2_transform[0, :] = p2_transform[0, :] / p2_transform[2, :]
p2_transform[1, :] = p2_transform[1, :] / p2_transform[2, :]
dist = np.sqrt((p2_transform[0, :] - p1_homo[0, :]) ** 2 + (p2_transform[1, :] - p1_homo[1, :]) ** 2)
num_inliers = np.where(dist <= tol)[0].shape[0]
if num_inliers > best_num_inliers:
best_num_inliers = num_inliers
best_inliers_p1 = p1_original[:, np.where(dist < tol)[0]]
best_inliers_p2 = p2_original[:, np.where(dist < tol)[0]]
bestH = computeH(best_inliers_p1, best_inliers_p2)
print(best_num_inliers)
return bestH
def compositeH(H, template, img):
"""
Returns final warped harry potter image.
INPUTS
H - homography
template - desk image
img - harry potter image
OUTPUTS
final_img - harry potter on book cover image
"""
# TODO
warp_img = cv2.warpPerspective(img, H, (template.shape[1], template.shape[0]))
warp_img_mask = warp_img > 0
final_img = template * np.logical_not(warp_img_mask)
final_img = final_img + warp_img
return final_img
if __name__ == "__main__":
im1 = cv2.imread("../data/model_chickenbroth.jpg")
im2 = cv2.imread("../data/chickenbroth_01.jpg")
locs1, desc1 = briefLite(im1)
locs2, desc2 = briefLite(im2)
matches = briefMatch(desc1, desc2)
ransacH(matches, locs1, locs2, num_iter=5000, tol=2)
| [
"jiaqig@umich.edu"
] | jiaqig@umich.edu |
fc12fdb31012dfead7791923b2e4968fb7e71626 | 6fd2bc0c49a7dfd5bacb9b83e7e70dbdadd06b5d | /venv/bin/rst2pseudoxml.py | 9a0dacce5ca0e7ea8a512c24fdf89b8c919aa8c9 | [] | no_license | LanLi2017/yw-text-mining | 85f8a1bdc879c9744426f3a7626905b6810a913d | f78dd8ff7e95a4b37fa6f0b1fd9a733df19a9011 | refs/heads/master | 2020-03-07T06:34:55.494792 | 2018-04-20T18:54:27 | 2018-04-20T18:54:27 | 127,326,584 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/Users/barbaralee/Downloads/text_tempo/venv/bin/python
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| [
"lilan.scut@gmail.com"
] | lilan.scut@gmail.com |
e76e2b812cf5dd4c5ba279d9149f795c44f7fb92 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-css/huaweicloudsdkcss/v1/model/update_batch_clusters_tags_request.py | c225a7ac1644dcf5da50dea1638a47ac291ed539 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,206 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateBatchClustersTagsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'cluster_id': 'str',
'resource_type': 'str',
'body': 'BatchAddOrDeleteTagOnClusterReq'
}
attribute_map = {
'cluster_id': 'cluster_id',
'resource_type': 'resource_type',
'body': 'body'
}
def __init__(self, cluster_id=None, resource_type=None, body=None):
"""UpdateBatchClustersTagsRequest
The model defined in huaweicloud sdk
:param cluster_id: 指定批量添加标签的集群ID。
:type cluster_id: str
:param resource_type: 资源类型,当前固定值为“css-cluster”,表示是集群类型。
:type resource_type: str
:param body: Body of the UpdateBatchClustersTagsRequest
:type body: :class:`huaweicloudsdkcss.v1.BatchAddOrDeleteTagOnClusterReq`
"""
self._cluster_id = None
self._resource_type = None
self._body = None
self.discriminator = None
self.cluster_id = cluster_id
self.resource_type = resource_type
if body is not None:
self.body = body
@property
def cluster_id(self):
"""Gets the cluster_id of this UpdateBatchClustersTagsRequest.
指定批量添加标签的集群ID。
:return: The cluster_id of this UpdateBatchClustersTagsRequest.
:rtype: str
"""
return self._cluster_id
@cluster_id.setter
def cluster_id(self, cluster_id):
"""Sets the cluster_id of this UpdateBatchClustersTagsRequest.
指定批量添加标签的集群ID。
:param cluster_id: The cluster_id of this UpdateBatchClustersTagsRequest.
:type cluster_id: str
"""
self._cluster_id = cluster_id
@property
def resource_type(self):
"""Gets the resource_type of this UpdateBatchClustersTagsRequest.
资源类型,当前固定值为“css-cluster”,表示是集群类型。
:return: The resource_type of this UpdateBatchClustersTagsRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this UpdateBatchClustersTagsRequest.
资源类型,当前固定值为“css-cluster”,表示是集群类型。
:param resource_type: The resource_type of this UpdateBatchClustersTagsRequest.
:type resource_type: str
"""
self._resource_type = resource_type
@property
def body(self):
"""Gets the body of this UpdateBatchClustersTagsRequest.
:return: The body of this UpdateBatchClustersTagsRequest.
:rtype: :class:`huaweicloudsdkcss.v1.BatchAddOrDeleteTagOnClusterReq`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateBatchClustersTagsRequest.
:param body: The body of this UpdateBatchClustersTagsRequest.
:type body: :class:`huaweicloudsdkcss.v1.BatchAddOrDeleteTagOnClusterReq`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateBatchClustersTagsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
165adde0d99ddd1735c70d313cf3373cba4f6491 | 16b389c8dcace7f7d010c1fcf57ae0b3f10f88d3 | /docs/jnpr_healthbot_swagger/test/test_tsdb_results_results.py | 52d7c555a3c3706eaec567bd43fc3901e0f88b13 | [
"Apache-2.0"
] | permissive | Juniper/healthbot-py-client | e4e376b074920d745f68f19e9309ede0a4173064 | 0390dc5d194df19c5845b73cb1d6a54441a263bc | refs/heads/master | 2023-08-22T03:48:10.506847 | 2022-02-16T12:21:04 | 2022-02-16T12:21:04 | 210,760,509 | 10 | 5 | Apache-2.0 | 2022-05-25T05:48:55 | 2019-09-25T05:12:35 | Python | UTF-8 | Python | false | false | 947 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 3.1.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.tsdb_results_results import TsdbResultsResults # noqa: E501
from swagger_client.rest import ApiException
class TestTsdbResultsResults(unittest.TestCase):
"""TsdbResultsResults unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTsdbResultsResults(self):
"""Test TsdbResultsResults"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.tsdb_results_results.TsdbResultsResults() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"nitinkr@juniper.net"
] | nitinkr@juniper.net |
1a673310c1c2d40b57833b7fe2c954217d1475b1 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/hvy_chainer-gan-improvements/chainer-gan-improvements-master/models.py | 73fd98b9a0765c144601f76bfd4813021a003e59 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,731 | py | import chainer
from functools import reduce
from chainer import Chain
from chainer import functions as F
from chainer import links as L
def lindim(dims, scale, n):
d = map(lambda x: x // scale, dims)
d = reduce(lambda x, y: x * y, d)
return d * n
def convdim(dims, scale, n):
return (n, dims[0] // scale, dims[1] // scale)
class MinibatchDiscrimination(Chain):
def __init__(self, in_shape, n_kernels, kernel_dim):
super(MinibatchDiscrimination, self).__init__(
t=L.Linear(in_shape, n_kernels*kernel_dim)
)
self.n_kernels = n_kernels
self.kernel_dim = kernel_dim
def __call__(self, x):
minibatch_size = x.shape[0]
activation = F.reshape(self.t(x), (-1, self.n_kernels, self.kernel_dim))
activation_ex = F.expand_dims(activation, 3)
activation_ex_t = F.expand_dims(F.transpose(activation, (1, 2, 0)), 0)
activation_ex, activation_ex_t = F.broadcast(activation_ex, activation_ex_t)
diff = activation_ex - activation_ex_t
xp = chainer.cuda.get_array_module(x.data)
eps = F.expand_dims(xp.eye(minibatch_size, dtype=xp.float32), 1)
eps = F.broadcast_to(eps, (minibatch_size, self.n_kernels, minibatch_size))
sum_diff = F.sum(abs(diff), axis=2)
sum_diff = F.broadcast_to(sum_diff, eps.shape)
abs_diff = sum_diff + eps
minibatch_features = F.sum(F.exp(-abs_diff), 2)
return F.concat((x, minibatch_features), axis=1)
class Generator(Chain):
def __init__(self, n_z, out_shape):
super(Generator, self).__init__(
fc0=L.Linear(n_z, lindim(out_shape, 2**4, 256)),
dc1=L.Deconvolution2D(256, 128, 4, stride=2, pad=1),
dc2=L.Deconvolution2D(128, 64, 4, stride=2, pad=1),
dc3=L.Deconvolution2D(64, 32, 4, stride=2, pad=1),
dc4=L.Deconvolution2D(32, 1, 4, stride=2, pad=1),
bn0=L.BatchNormalization(lindim(out_shape, 2**4, 256)),
bn1=L.BatchNormalization(128),
bn2=L.BatchNormalization(64),
bn3=L.BatchNormalization(32)
)
self.out_shape = out_shape
def __call__(self, z, test=False):
h = F.relu(self.bn0(self.fc0(z), test=test))
h = F.reshape(h, ((z.shape[0],) + convdim(self.out_shape, 2**4, 256)))
h = F.relu(self.bn1(self.dc1(h), test=test))
h = F.relu(self.bn2(self.dc2(h), test=test))
h = F.relu(self.bn3(self.dc3(h), test=test))
h = F.sigmoid(self.dc4(h))
return h
class Discriminator(Chain):
def __init__(self, in_shape):
super(Discriminator, self).__init__(
c0=L.Convolution2D(1, 32, 4, stride=2, pad=1),
c1=L.Convolution2D(32, 64, 4, stride=2, pad=1),
c2=L.Convolution2D(64, 128, 4, stride=2, pad=1),
c3=L.Convolution2D(128, 256, 4, stride=2, pad=1),
fc4=L.Linear(lindim(in_shape, 2**4, 256), 512),
mbd=MinibatchDiscrimination(512, 32, 8),
fc5=L.Linear(512, 512+32), # Alternative to minibatch discrimination
fc6=L.Linear(512+32, 2),
bn1=L.BatchNormalization(64),
bn2=L.BatchNormalization(128),
bn3=L.BatchNormalization(256)
)
def __call__(self, x, minibatch_discrimination=True, test=False):
h = F.leaky_relu(self.c0(x))
h = F.leaky_relu(self.bn1(self.c1(h), test=test))
h = F.leaky_relu(self.bn2(self.c2(h), test=test))
h = F.leaky_relu(self.bn3(self.c3(h), test=test))
h = self.fc4(h)
if minibatch_discrimination:
h = self.mbd(h)
else:
h = F.leaky_relu(self.fc5(h))
h = self.fc6(h)
return h
| [
"659338505@qq.com"
] | 659338505@qq.com |
4b3e6f2775b996f08a3037e74a91377b7232036c | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-QuickLookThumbnailing/PyObjCTest/test_qlthumbnailerrors.py | fabedd03a57cadba4bca452cd5f54f460ea054b9 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | import sys
from PyObjCTools.TestSupport import *
if sys.maxsize > 2 ** 32:
import QuickLookThumbnailing
class TestQLThumbnailErrors(TestCase):
def test_constants(self):
self.assertEqual(QuickLookThumbnailing.QLThumbnailErrorGenerationFailed, 0)
self.assertEqual(QuickLookThumbnailing.QLThumbnailErrorSavingToURLFailed, 1)
self.assertEqual(QuickLookThumbnailing.QLThumbnailErrorNoCachedThumbnail, 2)
self.assertEqual(QuickLookThumbnailing.QLThumbnailErrorNoCloudThumbnail, 3)
self.assertEqual(QuickLookThumbnailing.QLThumbnailErrorRequestInvalid, 4)
self.assertEqual(QuickLookThumbnailing.QLThumbnailErrorRequestCancelled, 5)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
b22642f1f7a159dafd93f1056cddcc1301f09729 | 9da8d60ba0c37a8f5d1f4a7ea8f33f7996b9f1bf | /73.Any_or_All.py | 5b4bbdc0c6c8acec9402064c28b664f058df799d | [] | no_license | save6/HackerRank | b7e764200e813453fe6037512652f1c1df1fdff3 | da7038b586399e599fdd9e96f7c3b599d928f6a7 | refs/heads/master | 2023-08-25T13:36:05.632435 | 2021-10-27T22:19:43 | 2021-10-27T22:19:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
palindromic = {1,2,3,4,5,6,7,8,9,11,22,33,44,55,66,77,88,99}
_ , nums = input(), list(map(int,input().split()))
if all((n > 0) for n in nums) and any([(n in palindromic) for n in nums]):
print("True")
else:
print("False")
| [
"save6green@gmail.com"
] | save6green@gmail.com |
55f466ea1c6faf37175c8c98394f9917363469bc | d4f05d51568bfda9fb964deba92d9fd599a3dcde | /desing_pattern/builder/director.py | 0d23b59834056b62d49a3cc9d6a50b0997c55591 | [] | no_license | Fullmoon8507/PythonPracticeProject | 44beba7ce783e5e22429516d39ee96adc1ead785 | 57454099ad67bfe4431ee997fada640fde6ccecc | refs/heads/master | 2020-04-16T23:29:58.907552 | 2017-05-06T07:27:35 | 2017-05-06T07:27:35 | 53,178,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | class Director():
def __init__(self, builder):
self.__builder = builder
def construct(self):
self.__builder.make_title('Greeting')
self.__builder.make_string('朝から昼にかけて')
string = ['おはようございます。', 'こんにちは。']
self.__builder.make_items(string)
self.__builder.make_string('夜に')
string = ['こんばんは。', 'おやすいなさい。', 'さようなら']
self.__builder.make_items(string)
self.__builder.close()
| [
"you@example.com"
] | you@example.com |
2c8d9db601c659e38f3092280b4a5b4c4df0d0a7 | adb683de2a0b799ad3bf14b1962d757631184d62 | /Data/Analyzer.py | f3db3de7dcc531a7387eaf38d27a62c11df12d82 | [] | no_license | psiddire/ZGamma | f2829c44bb11c36d9621ef5cd7bbdf1d610e7b84 | 0002c848946e910187cd41753ece04d2a136e174 | refs/heads/master | 2023-06-13T05:23:35.575395 | 2021-07-08T21:05:27 | 2021-07-08T21:05:27 | 384,248,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | import ROOT
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_path')
parser.add_argument('output_path')
args = parser.parse_args()
ROOT.ROOT.EnableImplicitMT()
f = ROOT.TFile(args.input_path)
t = f.Get("tree")
h_ll = ROOT.TH1F("h_ll", "ll_mass", 100, 50, 150)
h_llg = ROOT.TH1F("h_llg", "llg_mass", 80, 100, 180)
for i in range(0, t.GetEntries()):
if i%1000==0:
print i
t.GetEntry(i)
if not t.HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL:
continue
if t.nll < 1 or t.nphoton < 1:
continue
if all(dr < 0.4 for dr in t.photon_drmin):
continue
if t.ll_charge.size()==1 and t.ll_charge[0]!=0:
continue
if sum(map(bool, t.el_id)) < 2:
continue
idll = []
for z in range(t.nll):
x = t.ll_i1[z]
y = t.ll_i2[z]
if (t.el_charge[x]*t.el_charge[y] == -1 and \
t.el_pt[x] > 25 and t.el_dz[x] < 0.01 and t.el_dxy[x] < 0.005 and bool(t.el_id[x]) and abs(t.el_eta[x]) < 2.5 and \
t.el_pt[y] > 15 and t.el_dz[y] < 0.01 and t.el_dxy[y] < 0.005 and bool(t.el_id[y]) and abs(t.el_eta[y]) < 2.5):
idll.append(z)
break
if len(idll) == 0:
continue
massZ = t.ll_m[idll[0]]
idllg = []
iph = 0
ph = ROOT.TLorentzVector()
for z in range(t.nllphoton):
if t.llphoton_ill[z]!=idll[0]:
continue
if (t.photon_drmin[iph] > 0.4 and t.photon_pt[iph] > 15 and \
bool(bool(abs(t.photon_eta[iph]) < 1.4442 and t.photon_idmva[iph] > -0.4) or \
bool(1.566 < abs(t.photon_eta[iph]) < 2.5 and t.photon_idmva[iph] > -0.58))):
ph.SetPtEtaPhiM(t.photon_pt[iph], t.photon_eta[iph], t.photon_phi[iph], 0)
idllg.append(z)
break
iph = iph + 1
if len(idllg) == 0:
continue
massH = t.llphoton_m[idllg[0]]
if (massZ + massH) < 185:
continue
if (ph.E()/massH) < 15/110:
continue
h_ll.Fill(massZ)
h_llg.Fill(massH)
f1 = ROOT.TFile(args.output_path, "RECREATE")
h_ll.Write()
h_llg.Write()
f1.Close
| [
"psiddire@nd.edu"
] | psiddire@nd.edu |
12abb6333035c057d8dd69e0b7f68a5b9d5a11bd | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-petadata/aliyunsdkpetadata/request/v20160101/ReleaseInstancePublicConnectionRequest.py | eb09d8caf55b9a6f106d48d7e652a40e9d627e7d | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,370 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ReleaseInstancePublicConnectionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'PetaData', '2016-01-01', 'ReleaseInstancePublicConnection','petadata')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_CurrentConnectionString(self):
return self.get_query_params().get('CurrentConnectionString')
def set_CurrentConnectionString(self,CurrentConnectionString):
self.add_query_param('CurrentConnectionString',CurrentConnectionString) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
59a2ff7c929e6556bf4e0586fa9ae392438dbb8c | 57d907a992eb445526c24930b6441de13d6aae0a | /1584 Min Cost to Connect All points.py | f20049ae01a71716daa9f659e02a9db955c8586a | [] | no_license | sunnyyeti/Leetcode-solutions | 9004505ed3d9d62561df3145e755c01e33d41bb2 | 34a78e06d493e61b21d4442747e9102abf9b319b | refs/heads/master | 2023-06-24T06:29:04.948997 | 2023-06-17T09:43:38 | 2023-06-17T09:43:38 | 166,708,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py | # You are given an array points representing integer coordinates of some points on a 2D-plane, where points[i] = [xi, yi].
# The cost of connecting two points [xi, yi] and [xj, yj] is the manhattan distance between them: |xi - xj| + |yi - yj|, where |val| denotes the absolute value of val.
# Return the minimum cost to make all points connected. All points are connected if there is exactly one simple path between any two points.
# Example 1:
# Input: points = [[0,0],[2,2],[3,10],[5,2],[7,0]]
# Output: 20
# Explanation:
# We can connect the points as shown above to get the minimum cost of 20.
# Notice that there is a unique path between every pair of points.
# Example 2:
# Input: points = [[3,12],[-2,5],[-4,1]]
# Output: 18
# Example 3:
# Input: points = [[0,0],[1,1],[1,0],[-1,1]]
# Output: 4
# Example 4:
# Input: points = [[-1000000,-1000000],[1000000,1000000]]
# Output: 4000000
# Example 5:
# Input: points = [[0,0]]
# Output: 0
# Constraints:
# 1 <= points.length <= 1000
# -106 <= xi, yi <= 106
# All pairs (xi, yi) are distinct.
import heapq
class Solution:
def minCostConnectPoints(self, points: List[List[int]]) -> int:
if len(points) <= 1:
return 0
visited = {0}
n = len(points)
dis_mat = [[0]*n for _ in range(n)]
for i in range(n):
for j in range(i+1,n):
dis_mat[i][j] = dis_mat[j][i] = abs(points[i][0]-points[j][0])+abs(points[i][1]-points[j][1])
node_pools =[(dis_mat[0][j],j) for j in range(1,n)]
heapq.heapify(node_pools)
cost = 0
while len(visited)<len(points):
c,p = heapq.heappop(node_pools)
if p not in visited:
visited.add(p)
cost += c
for i in range(n):
if i!=p and i not in visited:
heapq.heappush(node_pools,(dis_mat[p][i],i))
return cost
| [
"H.Zhang2@shell.com"
] | H.Zhang2@shell.com |
366986b51dbf6b4df7e18b0dd018d1cac9c4503e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Mwh3zhKFu332qBhQa_5.py | 46547cd2f35bdf96a2b4f96bca66995203f248ad | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | """
**Mubashir** is trying to figure out the corresponding quadratic formula for
the following quadratic sequence of numbers:
N| Result
---|---
1| 90
2| 240
3| 450
4| 720
5| 1050
If you can figure this out, then help him by creating a function that takes a
number `n` and returns the **nth number of this quadratic sequence**.
### Examples
guess_sequence(1) ➞ 90
guess_sequence(2) ➞ 240
guess_sequence(3) ➞ 450
### Notes
If you are not sure about how to find the formula of a quadratic sequence,
check the Resources.
"""
def guess_sequence(n):
a = 0
for i in range(n):
a += 90+60*(i)
return a
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1a78d4bcd594477145a379d323148a9f879b060a | ea42dcad3172c2ea5546a79a74f8aa2edb342e83 | /pytorch_ssd/my_inference.py | c6edc02242f8d9e9006a5a184cef217840280eca | [
"MIT"
] | permissive | WuZifan/Detection_Models | 4781f11c420fa71aa54fa43c1b35494d9d2832ce | 6cfc52618ab084411786907b8272719721d90195 | refs/heads/master | 2021-02-07T22:07:26.175359 | 2020-03-01T07:22:27 | 2020-03-01T07:22:27 | 244,081,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | import torch
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor
import numpy as np
import cv2
class PytorchSSD():
_defaults = {
"model_path": './models/mobilenet-v1-ssd-mp-0_675.pth', # 权重放置
"label_path": './models/voc-model-labels.txt', # class_names
"net_type": 'mb1-ssd'
}
def __init__(self):
self.__dict__.update(self._defaults) # set up default values
self.class_names = [name.strip() for name in open(self.label_path).readlines()]
self.model = self.get_model()
def get_model(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net_type = 'mb1-ssd' # sys.argv[1]
model_path = './models/mobilenet-v1-ssd-mp-0_675.pth' # sys.argv[2]
label_path = './models/voc-model-labels.txt' # sys.argv[3]
class_names = [name.strip() for name in open(label_path).readlines()]
net = create_mobilenetv1_ssd(len(class_names), is_test=True)
net.load(model_path)
predictor = create_mobilenetv1_ssd_predictor(net, candidate_size=200, device=device)
return predictor
def detect(self,image):
if not isinstance(image,np.ndarray):
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
boxes, labels, probs = self.model.predict(image, 10, 0.4)
predict_names = [ self.class_names[lb] for lb in labels]
return boxes,predict_names,probs
| [
"wuzifan0817@gmail.com"
] | wuzifan0817@gmail.com |
d7fab24a70624d86a72508d9becec6155cb68a35 | ef4238779e35114b27d584ae3ef46fab75f90aea | /doctorwho/migrations/0005_auto_20190816_1105.py | 01d36e78e991733f26ed44fdafc47dee9b4afd4b | [
"MIT"
] | permissive | muhindokiro/capstone | 44107aaab11fb4dde512d46b1312bbde34b28dce | 1ef04c5873ef4ef6a960c60ed9af838b8506a289 | refs/heads/master | 2022-11-30T10:42:24.364175 | 2019-08-16T09:02:59 | 2019-08-16T09:02:59 | 202,692,222 | 0 | 0 | null | 2022-11-22T04:11:42 | 2019-08-16T08:44:36 | Python | UTF-8 | Python | false | false | 460 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-16 08:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doctorwho', '0004_auto_20190816_1003'),
]
operations = [
migrations.AlterField(
model_name='complication',
name='symptoms',
field=models.CharField(max_length=200),
),
]
| [
"blaize1143@gmail.com"
] | blaize1143@gmail.com |
4410c9f0bd13caabd2a00c19a1181232cbb7ae65 | 196f7e3238f961fb5eba7a794f0b0c75d7c30ba1 | /爱心/yourlove文字心.py | fecf088b89748026a12c7dcb96a9cb76a6f5c8e7 | [] | no_license | Liaoyingjie/Pythonlearn | d0b1b95110017af7e063813660e52c61a6333575 | 8bca069f38a60719acac5aa39bd347f90ab0bfb1 | refs/heads/master | 2020-04-08T07:35:07.357487 | 2018-04-12T16:44:43 | 2018-04-12T16:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | print('\n'.join([''.join([('yourlove'[(x-y)%8]if((x*0.05)**2+(y*0.1)**2-1)**3-(x*0.05)**2*(y*0.1)**3<=0 else' ')for x in range(-30,30)])for y in range(15,-15,-1)]))
| [
"godzoco@qq.com"
] | godzoco@qq.com |
5cdb2cabbe72a803abfce8fcf63de7e959c7b759 | 7a181ad3cf24ded5c9ad485801fe0cdd280c4367 | /spec/python/test_type_int_unary_op.py | b436cb5b669304b911a6ba61a4bae96a81045c6d | [] | no_license | tarm/kaitai_struct_tests | 6618116ebf972429099dc588bab73b2ac861e46c | c57fc4109fe48d594f06be464c3a41b00887c2db | refs/heads/master | 2021-01-19T09:15:34.447469 | 2017-04-24T03:41:35 | 2017-04-24T03:41:35 | 87,744,149 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import unittest
from type_int_unary_op import TypeIntUnaryOp
class TestTypeIntUnaryOp(unittest.TestCase):
def test_type_int_unary_op(self):
with TypeIntUnaryOp.from_file("src/fixed_struct.bin") as r:
self.assertEqual(r.value_s2, 0x4150)
self.assertEqual(r.value_s8, 0x4150ffff312d4b43)
self.assertEqual(r.unary_s2, -0x4150)
self.assertEqual(r.unary_s8, -0x4150ffff312d4b43)
| [
"greycat@altlinux.org"
] | greycat@altlinux.org |
4e56118748db9fdd467fb20becfbf7aa44f2e19c | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /assessment2/amazon/run/core/controllers/belligerent.py | 507a2d533a71abcdf6bff566493c0f11510e3101 | [] | no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | #!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('belligerent', __name__, url_prefix='/belligerent')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
| [
"sqlconsult@hotmail.com"
] | sqlconsult@hotmail.com |
2a12d1f4fd4a105f28771c32ac995c0c6a768db6 | d93159d0784fc489a5066d3ee592e6c9563b228b | /Configuration/Generator/python/H200ZZ4L_7TeV_cfi.py | 575eec3aaca6b69bf2905e7ce9eee71fa586fca4 | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 3,520 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUESettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
comEnergy = cms.double(7000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('PMAS(25,1)=200.0 !mass of Higgs',
'MSEL=0 !(D=1) to select between full user control (0, then use MSUB) and some preprogrammed alternative: QCD hight pT processes (1, then ISUB=11, 12, 13, 28, 53, 68), QCD low pT processes (2, then ISUB=11, 12, 13, 28, 53, 68, 91, 92, 94, 95)',
'MSUB(102)=1 !ggH',
'MSUB(123)=1 !ZZ fusion to H',
'MSUB(124)=1 !WW fusion to H',
'CKIN(45)=5. !high mass cut on m2 in 2 to 2 process Registered by Chris.Seez@cern.ch',
'CKIN(46)=150. !high mass cut on secondary resonance m1 in 2->1->2 process Registered by Alexandre.Nikitenko@cern.ch',
'CKIN(47)=5. !low mass cut on secondary resonance m2 in 2->1->2 process Registered by Alexandre.Nikitenko@cern.ch',
'CKIN(48)=150. !high mass cut on secondary resonance m2 in 2->1->2 process Registered by Alexandre.Nikitenko@cern.ch',
'MDME(174,1)=0 !Z decay into d dbar',
'MDME(175,1)=0 !Z decay into u ubar',
'MDME(176,1)=0 !Z decay into s sbar',
'MDME(177,1)=0 !Z decay into c cbar',
'MDME(178,1)=0 !Z decay into b bbar',
'MDME(179,1)=0 !Z decay into t tbar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=0 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=0 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=0 !Z decay into nu_tau nu_taubar',
'MDME(210,1)=0 !Higgs decay into dd',
'MDME(211,1)=0 !Higgs decay into uu',
'MDME(212,1)=0 !Higgs decay into ss',
'MDME(213,1)=0 !Higgs decay into cc',
'MDME(214,1)=0 !Higgs decay into bb',
'MDME(215,1)=0 !Higgs decay into tt',
'MDME(216,1)=0 !Higgs decay into',
'MDME(217,1)=0 !Higgs decay into Higgs decay',
'MDME(218,1)=0 !Higgs decay into e nu e',
'MDME(219,1)=0 !Higgs decay into mu nu mu',
'MDME(220,1)=0 !Higgs decay into tau nu tau',
'MDME(221,1)=0 !Higgs decay into Higgs decay',
'MDME(222,1)=0 !Higgs decay into g g',
'MDME(223,1)=0 !Higgs decay into gam gam',
'MDME(224,1)=0 !Higgs decay into gam Z',
'MDME(225,1)=1 !Higgs decay into Z Z',
'MDME(226,1)=0 !Higgs decay into W W'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
db86b62d9f209e4db7e2f49cceb8d032e67085df | b799ff87e5d50cb1d2318949a3d6d445fd6fe53e | /app_launcher.py | a4423676635f116067c0f0e37c41acdff1906aba | [] | no_license | rcshadman/pyktrader2 | 267235fc99cd941b63af32536c61a3d50923102a | ac24cef56b941c8076d1ee4fd2546ad86952a9d5 | refs/heads/master | 2021-08-30T11:20:48.207539 | 2017-12-17T17:52:20 | 2017-12-17T17:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,238 | py | # -*- coding: utf-8 -*-
import agent
import saveagent
import datetime
import sys
import time
import logging
import mysqlaccess
import misc
import base
import json
from gui_agent import *
def get_option_map(underliers, expiries, strikes):
opt_map = {}
for under, expiry, ks in zip(underliers, expiries, strikes):
for otype in ['C', 'P']:
for strike in ks:
cont_mth = int(under[-4:]) + 200000
key = (str(under), cont_mth, otype, strike)
instID = under
if instID[:2] == "IF":
instID = instID.replace('IF', 'IO')
instID = instID + '-' + otype + '-' + str(strike)
opt_map[key] = instID
return opt_map
def save(config_file, tday):
with open(config_file, 'r') as infile:
config = json.load(infile)
name = config.get('name', 'save_ctp')
filter_flag = config.get('filter_flag', False)
base.config_logging(name + "/" + name + ".log", level=logging.DEBUG,
format = '%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s',
to_console = True,
console_level = logging.INFO)
scur_day = datetime.datetime.strptime(tday, '%Y%m%d').date()
save_agent = saveagent.SaveAgent(config = config, tday = scur_day)
curr_insts = misc.filter_main_cont(tday, filter_flag)
for inst in curr_insts:
save_agent.add_instrument(inst)
try:
save_agent.restart()
while 1:
time.sleep(1)
except KeyboardInterrupt:
save_agent.exit()
def run_gui(config_file, tday):
with open(config_file, 'r') as infile:
config = json.load(infile)
name = config.get('name', 'test_agent')
base.config_logging(name + "/" + name + ".log", level=logging.DEBUG,
format = '%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s',
to_console = True,
console_level = logging.INFO)
scur_day = datetime.datetime.strptime(tday, '%Y%m%d').date()
myApp = MainApp(scur_day, config, master = None)
myGui = Gui(myApp)
# myGui.iconbitmap(r'c:\Python27\DLLs\thumbs-up-emoticon.ico')
myGui.mainloop()
def run(config_file, tday):
with open(config_file, 'r') as infile:
config = json.load(infile)
name = config.get('name', 'test_agent')
base.config_logging(name + "/" + name + ".log", level=logging.DEBUG,
format = '%(name)s:%(funcName)s:%(lineno)d:%(asctime)s %(levelname)s %(message)s',
to_console = True,
console_level = logging.INFO)
scur_day = datetime.datetime.strptime(tday, '%Y%m%d').date()
agent_class = config.get('agent_class', 'agent.Agent')
cls_str = agent_class.split('.')
agent_cls = getattr(__import__(str(cls_str[0])), str(cls_str[1]))
agent = agent_cls(config=config, tday=scur_day)
try:
agent.restart()
while 1:
time.sleep(1)
except KeyboardInterrupt:
agent.exit()
if __name__ == '__main__':
args = sys.argv[1:]
app_name = args[0]
params = (args[1], args[2], )
getattr(sys.modules[__name__], app_name)(*params) | [
"harvey_wwu@hotmail.com"
] | harvey_wwu@hotmail.com |
958967f366a99abf14009b08cdf9df3b3d41c161 | eac24fbc91fcd353c8996346972639f27bc3197c | /hotaru/hotaru_planner_node_blocks/scripts/hotaru_planner_node/algorithm/example_bezier.py | e05588b5f2124d167a8ebddf37261e90f909c08d | [
"BSD-3-Clause"
] | permissive | Forrest-Z/hotaru_planner | 3e05ae864d6dc6f46b5b23b3441a4de4dcbdd149 | 04070d58e72bd9d94c50c15ef3447ffdb40ce383 | refs/heads/master | 2023-01-10T01:43:29.113939 | 2020-10-01T14:22:11 | 2020-10-01T14:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | '''
Created on Sep 16, 2020
@author: kyberszittya
'''
import matplotlib.pyplot as plt
import numpy as np
from bezier import BezierCurve
def main():
bc = BezierCurve()
points = np.array(
[[0.0, 0.0],
[6.0, 4.0],
[7.0, -2.0],
[10.0, -5.0],
[9.0, -9.0]]
)
bc.add_control_vertices(points)
bc.initialize_parameter_values()
tr = bc.generate_path(100)
plt.plot(tr[:,0], tr[:,1])
plt.plot(points[:,0], points[:,1], 'r^')
plt.show()
if __name__=="__main__":
main() | [
"noreply@github.com"
] | Forrest-Z.noreply@github.com |
9aef765a578d20e22db90be9ad0ab1da0d1bf7ba | ddfca28d1c37815bc83608a62c249a8bb99f982a | /order/migrations/0001_initial.py | 15b0e2ded4875b4f011b10bf5745478d5a0b7fb5 | [] | no_license | suasue/ShockX | 6f26283c07577f4f22d53b7c0ac75b18282cb95e | b61d97e39b9624d23e724ab5215a407292c5d7d3 | refs/heads/main | 2023-04-28T04:57:38.240913 | 2021-05-11T08:28:18 | 2021-05-11T08:28:18 | 362,797,246 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,901 | py | # Generated by Django 3.1.6 on 2021-03-11 14:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Ask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('expiration_date', models.DateTimeField(null=True)),
('matched_at', models.DateTimeField(null=True)),
('total_price', models.DecimalField(decimal_places=2, max_digits=10, null=True)),
('order_number', models.CharField(max_length=100, null=True)),
],
options={
'db_table': 'asks',
},
),
migrations.CreateModel(
name='Bid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('expiration_date', models.DateTimeField(null=True)),
('matched_at', models.DateTimeField(null=True)),
('total_price', models.DecimalField(decimal_places=2, max_digits=10, null=True)),
('order_number', models.CharField(max_length=100, null=True)),
],
options={
'db_table': 'bids',
},
),
migrations.CreateModel(
name='ExpirationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
],
options={
'db_table': 'expiration_types',
},
),
migrations.CreateModel(
name='OrderStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
],
options={
'db_table': 'order_status',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ask', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='order.ask')),
('bid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='order.bid')),
],
options={
'db_table': 'orders',
},
),
migrations.AddField(
model_name='bid',
name='order_status',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.orderstatus'),
),
migrations.AddField(
model_name='bid',
name='product_size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.productsize'),
),
migrations.AddField(
model_name='bid',
name='shipping_information',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.shippinginformation'),
),
migrations.AddField(
model_name='bid',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user'),
),
migrations.AddField(
model_name='ask',
name='order_status',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.orderstatus'),
),
migrations.AddField(
model_name='ask',
name='product_size',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.productsize'),
),
migrations.AddField(
model_name='ask',
name='shipping_information',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.shippinginformation'),
),
migrations.AddField(
model_name='ask',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.user'),
),
]
| [
"fergith@naver.com"
] | fergith@naver.com |
22ba8814d7abcf860089661cdda27dad207af8d8 | 8f81de504adaddaf6c45f0c2198a06ac1a796636 | /test/fx2trt/converters/acc_op/test_avgpool.py | 597b909760af97a6f41876d1963b2c94c27ddf10 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | seemethere/pytorch | ac64f1410142eed319cd2c3d0876bb5748916487 | b428096680b804dfd3336e80efb5d9e6661236c1 | refs/heads/master | 2023-03-18T04:19:35.604440 | 2022-01-11T20:01:12 | 2022-01-11T20:01:12 | 227,252,129 | 1 | 0 | NOASSERTION | 2019-12-11T01:47:36 | 2019-12-11T01:47:36 | null | UTF-8 | Python | false | false | 1,506 | py | # Owner(s): ["oncall: fx"]
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
from parameterized import parameterized, param
from torch.testing._internal.common_fx2trt import AccTestCase
from torch.testing._internal.common_utils import run_tests
class TestAvgPoolConverter(AccTestCase):
@parameterized.expand(
[
("default", 1),
("kernal_size", 3),
("stride", 1, 2),
("tuple_parameters", 2, (1, 1), (1, 1)),
param("padding", 2, padding=1),
param("ceil_mode", 1, ceil_mode=True),
param("include_pad", 2, padding=1, count_include_pad=False),
]
)
def test_avg_pool2d(
self,
test_name,
kernel_size,
stride=1,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.avg_pool = torch.nn.AvgPool2d(
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
)
def forward(self, x):
return self.avg_pool(x)
inputs = [torch.randn(1, 3, 224, 224)]
self.run_test(TestModule(), inputs, expected_ops={acc_ops.avg_pool2d})
if __name__ == '__main__':
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
934998a4ce805894cfd4f9a5c1360d246c6b5376 | cba7444a9b6c7e3f9b557ff81c5ab03a2c8c6d8e | /dftimewolf/lib/preflights/ssh_multiplexer.py | ea898beb4a47d7f410e88e5651102b6ee1590f3e | [
"Apache-2.0"
] | permissive | log2timeline/dftimewolf | e364e0eb213b6a8bb3648598c62fd622cc509755 | bcea85b1ce7a0feb2aa28b5be4fc6ae124e8ca3c | refs/heads/main | 2023-08-24T09:49:25.971929 | 2023-08-17T12:30:07 | 2023-08-17T12:30:07 | 64,484,320 | 248 | 81 | Apache-2.0 | 2023-09-13T07:05:50 | 2016-07-29T13:54:45 | Python | UTF-8 | Python | false | false | 3,077 | py | """Opens an SSH connection to a server using ControlMaster directives."""
import subprocess
import uuid
from typing import Optional, List
from dftimewolf.lib import module
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.state import DFTimewolfState
class SSHMultiplexer(module.PreflightModule):
"""Opens an SSH connection.
Attributes:
hostname (str): The hostname we want to multiplex connections to.
user (str): The username to connect as.
id_file (str): SSH private key to use.
"""
def __init__(self,
state: DFTimewolfState,
name: Optional[str]=None,
critical: bool=False) -> None:
super(SSHMultiplexer, self).__init__(
state, name=name, critical=critical)
self.hostname = str()
self.user = None # type: Optional[str]
self.id_file = None # type: Optional[str]
self.extra_ssh_options = [] # type: Optional[List[str]]
self.control_filename = f"~/.ssh/ctrl-dftw-{str(uuid.uuid4())}"
def SetUp(self, # pylint: disable=arguments-differ
hostname: str,
user: Optional[str],
id_file: Optional[str],
extra_ssh_options: Optional[List[str]]) -> None:
"""Sets up the SSH multiplexer module's attributes.
Args:
hostname (str): The hostname we want to multiplex connections to.
user (str): The username to connect as.
id_file (str): SSH private key to use.
extra_ssh_options (List[str]): Extra -o options to be passed on to the
SSH command.
"""
self.hostname = hostname
self.user = user
self.id_file = id_file
self.extra_ssh_options = extra_ssh_options
def Process(self) -> None:
"""Open a shared SSH connection."""
command = ['ssh', '-q']
if self.user:
command.extend(['-l', self.user])
if self.id_file:
command.extend(['-i', self.id_file])
command.extend([
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=yes',
'-o', f'ControlPath={self.control_filename}',
])
if self.extra_ssh_options:
command.extend(self.extra_ssh_options)
command.extend([self.hostname, 'true']) # execute `true` and return
self.PublishMessage(
f'Opening shared SSH connection to: {" ".join(command)}')
ret = subprocess.call(command)
if ret != 0:
self.ModuleError(
'Unable to SSH to host {0:s}.'.format(self.hostname), critical=True)
self.state.AddToCache('ssh_control', self.control_filename)
def CleanUp(self) -> None:
"""Close the shared SSH connection."""
command = ['ssh',
'-O', 'exit',
'-o', f'ControlPath={self.control_filename}',
self.hostname]
ret = subprocess.call(command)
if ret != 0:
self.logger.error('Error cleaning up the shared SSH connection. Remove '
'any lingering ~/.ssh/ctrl-dftw-* files.')
else:
self.logger.info('Successfully cleaned up SSH connection.')
modules_manager.ModulesManager.RegisterModule(SSHMultiplexer)
| [
"noreply@github.com"
] | log2timeline.noreply@github.com |
eaaf1c8ae6a78e2e68a10a1bb439f2efa670a969 | 071201dd246e451043af40c3a81160e20014a55b | /code/utils/dataset.py | 8fa953699b3bbc521bc013aeed3f2a6e52d83c0a | [] | no_license | XuHangkun/tianchi_channel_1 | 3e3941bb5f6436c52b46b45403ed105c0aa51d1a | 4fb32c7799f060042841a62f146ccfe731b98def | refs/heads/main | 2023-04-24T17:58:10.945395 | 2021-05-11T02:59:22 | 2021-05-11T02:59:22 | 354,222,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,462 | py | # -*- coding: utf-8 -*-
"""
create a dataset from dataFrame
~~~~~~~~~~~~~~~~~~~~~~
:author: Xu Hangkun (许杭锟)
:copyright: © 2020 Xu Hangkun <xuhangkun@ihep.ac.cn>
:license: MIT, see LICENSE for more details.
"""
import pandas as pd
from torch.utils.data import Dataset
import numpy as np
import torch
from utils.EDA import RandomDelete,RandomSwap
class ReportDataset(Dataset):
"""
Dataset of medical report
"""
def __init__(self,df,tokenizer,nclass=29,max_len=70,label_smoothing=0,eda_alpha=0.1,n_aug=2,pretrain=False):
"""
create a dataset from dataFrame, ['id','report','label']
args:
-df : dataframe which contain three columns , ['id','report','label']
-nclass : number of classes
-max_len : max lenght of report, if a report is longer than max_len, just cut it
-label_smoothing : None or a value between 0 and 1, which means the possibility of wrong label
"""
self.max_len = max_len
self.nclass = nclass
self.tokenizer = tokenizer
self.label_smoothing = label_smoothing
self.eda_alpha = eda_alpha
self.n_aug = n_aug
self.pretrain = pretrain
super(ReportDataset,self).__init__()
# generate texts
self.texts = df['report'].values
self.preprocess_text()
# generate the labels
self.labels = df['label'].values
self.labels_freq = [0 for x in range(self.nclass)]
self.preprocess_label()
self.enhanced_texts = []
self.enhanced_labels = []
self.easy_data_augmentation()
def preprocess_text(self):
"""
convert the text from string to list of token number
eg:
"1 2 4" -> [1,2,4]
"""
texts = []
for text in self.texts:
texts.append(self.tokenizer(str(text)))
self.texts = texts
def easy_data_augmentation(self):
"""
Data Enhancement, randomly delete partial words or swap the words
For evergy sentence, we need to change eda_alpha*sentence_len words.
"""
if self.n_aug == 0 or self.n_aug < 0.1:
return
for i in range(len(self.texts)):
true_aug = 0
if self.n_aug >1:
true_aug = int(self.n_aug)
elif self.n_aug >= 0:
if np.random.random() < self.n_aug:
true_aug = 1
label_all_zero = True
for j in range(len(self.labels[i])):
if self.labels[i][j] > 0.5:
label_all_zero = False
for j in range(true_aug):
# randomly delete some words
self.enhanced_texts.append(RandomDelete(self.texts[i],self.eda_alpha))
self.enhanced_labels.append(self.labels[i])
# randomly swap some words
self.enhanced_texts.append(RandomSwap(self.texts[i],self.eda_alpha))
self.enhanced_labels.append(self.labels[i])
if self.pretrain:
self.texts = self.enhanced_texts
self.labels = self.enhanced_labels
else:
self.texts += self.enhanced_texts
self.labels += self.enhanced_labels
# randomly break up the data
for i in range(3*len(self.texts)):
text_1_index = int(np.random.random()*len(self.texts))
text_2_index = int(np.random.random()*len(self.texts))
x = self.texts[text_1_index]
self.texts[text_1_index] = self.texts[text_2_index]
self.texts[text_2_index] = x
x = self.labels[text_1_index]
self.labels[text_1_index] = self.labels[text_2_index]
self.labels[text_2_index] = x
def preprocess_label(self):
"""
convert the label to multi-hot tensor: [1,2] --> [0,1,1,0,0....]
"""
labels = []
labels_freq = [0 for x in range(self.nclass)]
for label in self.labels:
label_tensor = [0.0 for i in range(self.nclass)]
label = str(label)
if "," not in label:
label += ","
label_area,label_ill = label.split(',')
# label area
if label_area == '' or label_area == 'nan' or label_area == " ":
pass
else:
label_area = [int(x) for x in label_area.split()]
for index in label_area:
label_tensor[index] = 1.0
labels_freq[index] += 1./len(self.labels)
if label_ill == '' or label_ill == 'nan' or label_area == " ":
pass
else:
label_ill = [int(x) for x in label_ill.split()]
for index in label_ill:
label_tensor[index + 17] = 1.0
labels_freq[index + 17] += 1./len(self.labels)
labels.append(label_tensor)
self.labels = labels
self.labels_freq = labels_freq
def __len__(self):
return len(self.labels)
def __getitem__(self,idx):
"""
return array of report and label
report,label = [1,2,3....],[0,1]
"""
# do label smoothing
if self.label_smoothing:
new_label = [label for label in self.labels[idx]]
for j in range(len(new_label)):
if np.random.random() < self.label_smoothing*self.labels_freq[j]:
new_label[j] = 1 - new_label[j]
return np.array(self.texts[idx]),np.array(new_label)
else:
#if len(self.texts[idx]) > self.max_len:
# new_seq = self.texts[idx][:self.max_len//2] + self.texts[idx][len(self.texts[idx])-self.max_len//2:len(self.texts[idx])]
# return np.array(self.texts[idx]),np.array(self.labels[idx])
return np.array(self.texts[idx]),np.array(self.labels[idx])
def getitem(self,idx):
return np.array(self.texts[idx]),np.array(self.labels[idx])
def test():
import pandas as pd
import os
train_df = pd.read_csv(os.path.join(os.getenv('PROJTOP'),'tcdata/train.csv'),sep="\|,\|",names=["id","report","label"],index_col=0)
data = ReportDataset(train_df)
count = 0
for index in range(10):
text,label = data[index]
print('text: ',text)
print('label: ',label)
print('')
if __name__ == "__main__":
test()
| [
"xuhangkun@ihep.ac.cn"
] | xuhangkun@ihep.ac.cn |
2371fc8c067be676730d2949fa474a8b56b76748 | 63394b8b324314439ce0c020e1f652d3bdbca794 | /backend/restaurant/migrations/0001_initial.py | 5173565f00d291cb70c4348a12552dd8c00b7a12 | [
"MIT"
] | permissive | TBD-Team/WellCOME-Food | 367f0c0ae1a90fd76433b6fcef82fbaf45f856e5 | 1cebb373ac142f847039eea0091e4e0a87f93eb2 | refs/heads/main | 2023-05-08T12:11:24.993256 | 2021-06-02T23:05:35 | 2021-06-02T23:05:35 | 372,662,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | # Generated by Django 3.2.3 on 2021-06-02 14:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Meal',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image_url', models.TextField()),
('meal_type', models.PositiveSmallIntegerField(choices=[(1, 'Default'), (2, 'Vegetarian'), (3, 'Vegan')], default=1)),
],
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('day_period', models.PositiveSmallIntegerField(choices=[(1, 'Breakfast'), (2, 'Lunch'), (3, 'Dinner')], default=1)),
('meals', models.ManyToManyField(to='restaurant.Meal')),
],
options={
'unique_together': {('date', 'day_period')},
},
),
]
| [
"fvcneto.master98@gmail.com"
] | fvcneto.master98@gmail.com |
def96c91f84677ba838dc3e35922a5248bd3b68e | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/notifications/client/bountyNotificationAdapter.py | fdbf24f2e02799c5518837fcb1406cb5f3d0c691 | [] | no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\notifications\client\bountyNotificationAdapter.py
class BountyNotificationAdapter(object):
__notifyevents__ = ['OnBountyAddedToPayout']
def __init__(self, loggerService):
self.loggerService = loggerService
def OnBountyAddedToPayout(self, dataDict):
amount = dataDict['amount']
payoutTimestamp = dataDict['payoutTime']
enemyTypeID = dataDict['enemyTypeID']
self.loggerService.AddBountyMessage(amount, payoutTimestamp, enemyTypeID) | [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
0b9daec1251e3786028831bbd5b8c15f3508df08 | 616cc6c05f525dd2cb67916601f6ecd2c8242f24 | /example/object_abstraction.py | fc8ee859f02d48dd1c4c7bb1857d72d5127f91d3 | [] | no_license | cookieli/cs61a_li | 6f1d51aad7cd32fb27f64c855b3803bd2f8d9aad | 6ee0df9c64842bde9e30a0484e661abf04212358 | refs/heads/master | 2020-04-07T14:32:38.337554 | 2018-03-07T10:18:03 | 2018-03-07T10:18:03 | 124,218,933 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | def make_adder(n):
def adder(k):
return n + k
return adder
class Adder(object):
def __init__(self, n):
self.n = n
def __call__(self, k):
return self.n + k
class Dynamo:
def __getattr__(self, key):
if key == 'color':
return 'PapayaWhip'
else:
raise AttributeError
class SuperDynamo:
def __getattribute__(self, key):
if key == 'color':
return 'PapayaWhip'
else:
raise AttributeError
class Rastan:
def __getattribute__(self, key):
raise AttributeError
def swim(self):
pass
| [
"you@example.com"
] | you@example.com |
803c10077368d796211b38b26706aceb3bd6b077 | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/common/dossiers2/ui/achievements.py | 6ab27f2e80cc23b2ca067b3a27757d8e7809ba8f | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,472 | py | # 2016.02.14 12:44:29 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/dossiers2/ui/achievements.py
import resource_helper
from debug_utils import LOG_CURRENT_EXCEPTION
BATTLE_HERO_TEXTS = {'warrior': '#achievements:warrior',
'invader': '#achievements:invader',
'sniper': '#achievements:sniper',
'defender': '#achievements:defender',
'steelwall': '#achievements:steelwall',
'supporter': '#achievements:supporter',
'scout': '#achievements:scout',
'evileye': '#achievements:evileye'}
class ACHIEVEMENT_BLOCK:
CLIENT = 'client'
TOTAL = 'achievements'
TEAM_7X7 = 'achievements7x7'
HISTORICAL = 'historicalAchievements'
UNIQUE = 'uniqueAchievements'
RARE = 'rareAchievements'
FORT = 'fortAchievements'
SINGLE = 'singleAchievements'
SINGLE_7X7 = 'singleAchievementsRated7x7'
CLAN = 'clanAchievements'
RATED_7X7 = 'achievementsRated7x7'
FALLOUT = 'falloutAchievements'
ALL = (CLIENT,
TOTAL,
TEAM_7X7,
HISTORICAL,
UNIQUE,
RARE,
FORT,
SINGLE,
CLAN,
RATED_7X7,
SINGLE_7X7,
FALLOUT)
class ACHIEVEMENT_MODE:
RANDOM = 1
TEAM_7X7 = 2
HISTORICAL = 4
RATED_7X7 = 8
ALL = RANDOM | TEAM_7X7 | HISTORICAL | RATED_7X7
class ACHIEVEMENT_TYPE:
REPEATABLE = 'repeatable'
CLASS = 'class'
CUSTOM = 'custom'
SERIES = 'series'
SINGLE = 'single'
ALL = (REPEATABLE,
CLASS,
CUSTOM,
SERIES,
SINGLE)
class ACHIEVEMENT_SECTION:
EPIC = 'epic'
BATTLE = 'battle'
SPECIAL = 'special'
CLASS = 'class'
ACTION = 'action'
MEMORIAL = 'memorial'
GROUP = 'group'
ALL = (EPIC,
BATTLE,
SPECIAL,
CLASS,
ACTION,
MEMORIAL,
GROUP)
_AT, _AS, _AB, _AM = (ACHIEVEMENT_TYPE,
ACHIEVEMENT_SECTION,
ACHIEVEMENT_BLOCK,
ACHIEVEMENT_MODE)
DEFAULT_WEIGHT = -1
def makeAchievesStorageName(block):
return (block, '')
WHITE_TIGER_RECORD = (_AB.CLIENT, 'whiteTiger')
RARE_STORAGE_RECORD = makeAchievesStorageName(_AB.RARE)
MARK_OF_MASTERY_RECORD = (_AB.TOTAL, 'markOfMastery')
MARK_ON_GUN_RECORD = (_AB.TOTAL, 'marksOnGun')
_MODE_CONVERTER = {'random': ACHIEVEMENT_MODE.RANDOM,
'7x7': ACHIEVEMENT_MODE.TEAM_7X7,
'historical': ACHIEVEMENT_MODE.HISTORICAL,
'rated7x7': ACHIEVEMENT_MODE.RATED_7X7,
'all': ACHIEVEMENT_MODE.ALL}
ACHIEVEMENTS = {}
ACHIEVEMENT_SECTIONS_ORDER = (_AS.BATTLE,
_AS.SPECIAL,
_AS.EPIC,
_AS.GROUP,
_AS.MEMORIAL,
_AS.CLASS,
_AS.ACTION)
ACHIEVEMENT_SECTIONS_INDICES = dict(((n, i) for i, n in enumerate(ACHIEVEMENT_SECTIONS_ORDER)))
BATTLE_ACHIEVES_WITH_RIBBON = []
BATTLE_ACHIEVES_RIGHT = []
FORT_BATTLE_ACHIEVES_RIGHT = []
BATTLE_APPROACHABLE_ACHIEVES = []
def getType(record):
global ACHIEVEMENTS
if record in ACHIEVEMENTS:
return ACHIEVEMENTS[record]['type']
else:
return None
def getSection(record):
if record in ACHIEVEMENTS:
return ACHIEVEMENTS[record]['section']
else:
return None
def getMode(record):
if record in ACHIEVEMENTS:
return ACHIEVEMENTS[record]['mode']
else:
return None
def getWeight(record):
if record in ACHIEVEMENTS:
return ACHIEVEMENTS[record]['weight']
else:
return None
def init(achievesMappingXmlPath):
global BATTLE_APPROACHABLE_ACHIEVES
global BATTLE_ACHIEVES_WITH_RIBBON
global BATTLE_ACHIEVES_RIGHT
global FORT_BATTLE_ACHIEVES_RIGHT
raise achievesMappingXmlPath or AssertionError('Invalid achievements mapping file')
ctx, section = resource_helper.getRoot(achievesMappingXmlPath)
for ctx, subSection in resource_helper.getIterator(ctx, section['achievements']):
try:
item = resource_helper.readItem(ctx, subSection, name='achievement')
if not item.name:
continue
block, name = tuple(item.name.split(':'))
if block not in ACHIEVEMENT_BLOCK.ALL:
raise Exception('Unknown block name', (block, name))
if 'type' not in item.value or item.value['type'] not in ACHIEVEMENT_TYPE.ALL:
raise Exception('Unknown achievement type', (block, name), item.value)
if 'section' not in item.value or item.value['section'] not in ACHIEVEMENT_SECTION.ALL:
raise Exception('Unknown achievement section', (block, name), item.value)
if 'mode' not in item.value or item.value['mode'] not in _MODE_CONVERTER:
raise Exception('Unknown achievement mode', (block, name), item.value)
value = dict(item.value)
value['mode'] = _MODE_CONVERTER[item.value['mode']]
if 'weight' not in value:
value['weight'] = -1.0
ACHIEVEMENTS[block, name] = value
except:
LOG_CURRENT_EXCEPTION()
BATTLE_ACHIEVES_WITH_RIBBON = tuple(resource_helper.readList(ctx, section['battleAchievesWithRibbon']).value)
BATTLE_ACHIEVES_RIGHT = tuple(resource_helper.readList(ctx, section['battleResultsRight']).value)
FORT_BATTLE_ACHIEVES_RIGHT = tuple(resource_helper.readList(ctx, section['fortBattleResultsRight']).value)
BATTLE_APPROACHABLE_ACHIEVES = tuple(resource_helper.readList(ctx, section['approachableAchieves']).value)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\dossiers2\ui\achievements.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:44:29 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
4ee247fde665852a08a5f282e9158ec63cb19de9 | ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb | /examples/with_pyspark_emr/setup.py | 84c19c803342474296cc29e1c9670f2c1a1cd5d9 | [
"Apache-2.0"
] | permissive | dagster-io/dagster | 6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a | fe21995e0402878437a828c6a4244025eac8c43b | refs/heads/master | 2023-09-05T20:46:08.203794 | 2023-09-05T19:54:52 | 2023-09-05T19:54:52 | 131,619,646 | 8,565 | 1,154 | Apache-2.0 | 2023-09-14T21:57:37 | 2018-04-30T16:30:04 | Python | UTF-8 | Python | false | false | 307 | py | from setuptools import find_packages, setup
setup(
name="with_pyspark_emr",
packages=find_packages(exclude=["with_pyspark_emr_tests"]),
install_requires=[
"dagster",
"dagster-aws",
"dagster-pyspark",
],
extras_require={"dev": ["dagster-webserver", "pytest"]},
)
| [
"noreply@github.com"
] | dagster-io.noreply@github.com |
3fe0035050582f36f56a31a5a2f1b228970f994f | 9e8b11fb2e905cd7710bcd4ca441d48580e34fe6 | /hypergan/train_hooks/experimental/gradient_locally_stable_train_hook.py | 6ed7ab9fa5cfaf0ea9d11b8947bdcfa087e74a94 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | logbie/HyperGAN | ee6cdafdc17f40e30ce585d8ba46bb30758c7b67 | 07b0f070c05a56b9931638512b06e79e3d936c64 | refs/heads/master | 2022-06-04T12:15:04.350622 | 2020-04-29T17:14:36 | 2020-04-29T17:14:36 | 110,853,824 | 0 | 0 | MIT | 2020-04-29T17:14:37 | 2017-11-15T15:50:32 | Python | UTF-8 | Python | false | false | 1,298 | py | #From https://gist.github.com/EndingCredits/b5f35e84df10d46cfa716178d9c862a3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
import tensorflow as tf
import hyperchamber as hc
import numpy as np
import inspect
from operator import itemgetter
from hypergan.train_hooks.base_train_hook import BaseTrainHook
class GradientLocallyStableTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None, name="GradientLocallyStableTrainHook", memory_size=2, top_k=1):
super().__init__(config=config, gan=gan, trainer=trainer, name=name)
d_vars = gan.d_vars()
g_vars = gan.g_vars()
d_loss = gan.loss.sample[0]
gls = tf.gradients(d_loss, d_vars+g_vars)
gls = tf.square(tf.global_norm(gls))
self.g_loss = self.config["lambda"] * gls
self.add_metric('gradient_locally_stable', ops.squash(gls, tf.reduce_mean))
def losses(self):
return [None, self.g_loss]
def after_step(self, step, feed_dict):
pass
def before_step(self, step, feed_dict):
pass
| [
"mikkel@255bits.com"
] | mikkel@255bits.com |
a8a298b7cc4eb3a8ceb62ca7b69bcd50feec4a18 | 1559ef4153ccc355e3d4992845ae813217ddac4c | /generate_elb_data_rs.py | 10c57bb7aec16fc2540063ec3e99f30bb4488e56 | [] | no_license | akx/elbowgrease | b61f20566a6a8d5712e2f407eff4b57c87f44796 | 5f87ee8da5f9071d86e73660e3351607607fdf68 | refs/heads/master | 2023-01-27T22:11:57.216908 | 2020-12-07T11:31:54 | 2020-12-07T11:31:54 | 319,106,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | import json
# Via https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html#access-log-entry-syntax
spec = """
proto
time
elb
client:port
target:port
request_processing_time
target_processing_time
response_processing_time
elb_status_code
target_status_code
received_bytes
sent_bytes
"request"
"user_agent"
ssl_cipher
ssl_protocol
target_group_arn
"trace_id"
"domain_name"
"chosen_cert_arn"
matched_rule_priority
request_creation_time
"actions_executed"
"redirect_url"
"error_reason"
"target:port_list"
"target_status_code_list"
"classification"
"classification_reason"
""".strip().splitlines()
regex_bits = []
fields = []
for line in spec:
quoted = line.startswith('"')
line = line.strip('"').replace(":", "_")
if quoted:
regex_bits.append(fr'"(?P<{line}>.+?)"')
else:
regex_bits.append(fr"(?P<{line}>[^\s]+)")
fields.append(line)
regex = "^" + " ".join(regex_bits)
template = f"""
use regex::{{Regex}};
pub const LINE_RE_TEXT: &str = {json.dumps(regex)};
pub const FIELD_NAMES: [&str; {len(fields)}] = {json.dumps(fields)};
lazy_static! {{
pub static ref LINE_RE: Regex = Regex::new(LINE_RE_TEXT).unwrap();
}}
""".strip()
print(template)
| [
"akx@iki.fi"
] | akx@iki.fi |
37603f5987498b4e4092ded98ecf168f10807b25 | 34ddb81af1be500841ba108775b63ff53afbb19e | /test/topology/wiregraph_tests.py | d1f919378ffc61eb7831f9816b183e185b5deabc | [
"MIT"
] | permissive | W3SS/dilapidator | 0f84ce1dc5b70a3b826934d81c08a853bf98c6b3 | 323595364b934ea5d9f979603c0f49da8640d41f | refs/heads/master | 2021-06-21T07:08:55.461605 | 2017-07-04T14:33:23 | 2017-07-04T14:33:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,476 | py | from dilap.geometry.vec3 import vec3
from dilap.geometry.quat import quat
import dilap.geometry.tools as gtl
import dilap.geometry.polymath as pym
import dilap.topology.wiregraph as pgr
import dilap.core.plotting as dtl
import matplotlib.pyplot as plt
import unittest,random,numpy
import pdb
#python3 -m unittest discover -v ./ "*tests.py"
###############################################################################
class test_wiregraph(unittest.TestCase):
def test_avrvaerese(self):
wg = pgr.wiregraph()
i1 = wg.av(**{})
i2 = wg.av(**{})
i3 = wg.av(**{})
i4 = wg.av(**{})
r1 = wg.ae(i1,i2)
r2 = wg.ae(i2,i3)
r3 = wg.ae(i3,i4)
r4 = wg.ae(i4,i1)
r2 = wg.re(i2,i3)
i5 = wg.av(**{})
r5,r6 = wg.se(i1,i4,i5)
i1 = wg.rv(i1)
def test_orings(self):
wg = pgr.wiregraph()
i1 = wg.av()
i2 = wg.av()
i3 = wg.av()
i4 = wg.av()
r1 = wg.ae(i1,i2)
r2 = wg.ae(i1,i3)
r3 = wg.ae(i1,i4)
self.assertEqual(wg.orings[0],[1,2,3])
def test_mev(self):
wg = pgr.wiregraph()
i1 = wg.av(**{})
i2,r1 = wg.mev(i1,{},{})
i3,r2 = wg.mev(i2,{},{})
def test_loop(self):
def pl(il):
ilp = [rg.vs[j][1]['p'] for j in il]
ilp = pym.contract(ilp,2.0)
ax = rg.plot()
ax = dtl.plot_polygon(ilp,ax,col = 'b',lw = 4)
plt.show()
rg = pgr.wiregraph()
#import dilap.topology.planargraph as pgr
#rg = pgr.planargraph()
i1 = rg.av(p = vec3( 10,-5,0),l = 0)
i2 = rg.av(p = vec3( 10, 5,0),l = 0)
i3 = rg.av(p = vec3(-10, 5,0),l = 0)
i4 = rg.av(p = vec3(-10,-5,0),l = 0)
r1 = rg.ae(i1,i2)
r2 = rg.ae(i2,i3)
r3 = rg.ae(i3,i4)
r4 = rg.ae(i4,i1)
i5 = rg.av(p = vec3(2,-10,0),l = 0)
r5,r6 = rg.se(i1,i4,i5)
i6 = rg.av(p = vec3(-2,10,0),l = 0)
r7,r8 = rg.se(i2,i3,i6)
r9 = rg.ae(i5,i6)
il = rg.loop(i5,i6,'cw')
self.assertEqual(il,[i5,i6,i2,i1])
#pl(il)
il = rg.loop(i5,i6,'ccw')
self.assertEqual(il,[i5,i6,i3,i4])
#pl(il)
il = rg.loop(i5,i1,'cw')
self.assertEqual(il,[i5,i1,i2,i6,i3,i4])
#pl(il)
il = rg.loop(i5,i1,'ccw')
self.assertEqual(il,[i5,i1,i2,i6])
#pl(il)
il = rg.loop(i1,i5,'cw')
self.assertEqual(il,[i1,i5,i6,i2])
#pl(il)
i7,r10 = rg.mev(i1,{'p':vec3(12,-20,0),'l':0},{})
il = rg.loop(i5,i6,'cw')
self.assertEqual(il,[i5,i6,i2,i1])
#pl(il)
il = rg.loop(i5,i6,'ccw')
self.assertEqual(il,[i5,i6,i3,i4])
#pl(il)
il = rg.loop(i7,i1,'cw')
self.assertEqual(il,[i7,i1,i2,i6,i3,i4,i5,i1])
#pl(il)
il = rg.loop(i7,i1,'ccw')
self.assertEqual(il,[i7,i1,i5,i4,i3,i6,i2,i1])
#pl(il)
i8,r11 = rg.mev(i3,{'p':vec3(-5,0,0),'l':0},{})
il = rg.loop(i3,i4,'ccw')
self.assertEqual(il,[i3,i4,i5,i6,i3,i8])
#pl(il)
il = rg.loop(i3,i4,'cw')
self.assertEqual(il,[i3,i4,i5,i1,i7,i1,i2,i6])
#pl(il)
def test_uloops(self):
def pl():
ax = rg.plot()
for lp in loops:
lpps = [rg.vs[j][1]['p'] for j in lp]
lpps = pym.contract(lpps,2)
ax = dtl.plot_polygon(lpps,ax,lw = 3,col = 'b')
plt.show()
rg = pgr.wiregraph()
i1 = rg.av(p = vec3( 10,-5,0),l = 0)
i2 = rg.av(p = vec3( 10, 5,0),l = 0)
i3 = rg.av(p = vec3(-10, 5,0),l = 0)
i4 = rg.av(p = vec3(-10,-5,0),l = 0)
r1 = rg.ae(i1,i2)
r2 = rg.ae(i2,i3)
r3 = rg.ae(i3,i4)
r4 = rg.ae(i4,i1)
i5 = rg.av(p = vec3(2,-10,0),l = 0)
r5,r6 = rg.se(i1,i4,i5)
i6 = rg.av(p = vec3(-2,10,0),l = 0)
r7,r8 = rg.se(i2,i3,i6)
r9 = rg.ae(i5,i6)
loops = rg.uloops('ccw')
#pl()
self.assertEqual(len(loops),3)
i7,r10 = rg.mev(i1,{'p':vec3(12,-20,0),'l':0},{})
loops = rg.uloops('ccw')
#pl()
self.assertEqual(len(loops),3)
###############################################################################
if __name__ == '__main__':unittest.main()
###############################################################################
| [
"cogle@vt.edu"
] | cogle@vt.edu |
3de94ab45ce96c31ce827ed35df6f9186b372e52 | e44c1ac44a3cc912fbeaa0152b9294a03fd893ea | /pyTuplingUtils/utils.py | 44600c53f18682a3197cb28573f6ca84c0fd6a6c | [
"BSD-2-Clause"
] | permissive | umd-lhcb/pyTuplingUtils | ca03db1975f7f283caab1436ac1c5d85fad75d2a | 85f3ca90f01389f834af6de1044364843210c4c5 | refs/heads/master | 2023-03-10T00:12:40.922444 | 2023-03-03T23:31:09 | 2023-03-03T23:31:09 | 215,201,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | #!/usr/bin/env python3
#
# Author: Yipeng Sun
# License: BSD 2-clause
# Last Change: Wed Mar 30, 2022 at 12:47 PM -0400
import numpy as np
from .io import read_branches
# Find total number of events (unique events) out of total number of candidates.
def extract_uid(ntp, tree, run_branch='runNumber', event_branch='eventNumber',
conditional=None, run_array=None, event_array=None):
if run_array is None or event_array is None:
run, event = read_branches(ntp, tree, (run_branch, event_branch))
else:
run, event = run_array, event_array
if conditional is not None:
run = run[conditional]
event = event[conditional]
run = np.char.mod('%d', run)
event = np.char.mod('%d', event)
run = np.char.add(run, '-')
ids = np.char.add(run, event)
uid, idx, count = np.unique(ids, return_index=True, return_counts=True)
num_of_evt = ids.size
num_of_ids = uid.size
num_of_dupl_ids = uid[count > 1].size
# num_of_evt_w_dupl_id = np.sum(count[count > 1]) - num_of_dupl_ids
num_of_evt_w_dupl_id = num_of_evt - num_of_ids
return uid, idx, num_of_evt, num_of_ids, \
num_of_dupl_ids, num_of_evt_w_dupl_id
def find_common_uid(ntp1, ntp2, tree1, tree2, **kwargs):
uid1, idx1 = extract_uid(ntp1, tree1, **kwargs)[0:2]
uid2, idx2 = extract_uid(ntp2, tree2, **kwargs)[0:2]
uid_comm, uid_comm_idx1, uid_comm_idx2 = np.intersect1d(
uid1, uid2, assume_unique=True, return_indices=True)
return uid_comm, idx1[uid_comm_idx1], idx2[uid_comm_idx2]
def gen_histo(array, bins=200, scale=1.05, data_range=None, **kwargs):
if data_range is None:
data_min = array.min()
data_max = array.max()
data_min = data_min*scale if data_min < 0 else data_min/scale
data_max = data_max/scale if data_max < 0 else data_max*scale
return np.histogram(array, bins, (data_min, data_max), **kwargs)
return np.histogram(array, bins, data_range, **kwargs)
def gen_histo_stacked_baseline(histos):
result = [np.zeros(histos[0].size)]
for idx in range(0, len(histos)-1):
result.append(result[idx]+histos[idx])
return result
| [
"syp@umd.edu"
] | syp@umd.edu |
7c6e227a6218bb07e37801575684936eefa3ad62 | 14c032f1ce2685e8113da0cd29b5dd5bfb9cc478 | /src/tests/test_dict.py | cb90a674a085a2ce833c58e4f83c55f987556b33 | [
"MIT"
] | permissive | richardARPANET/persistent-dict | 6ec8459e968ae1e1dba32e5e8f10905eb621f434 | bee97fb8d725dd3af047bf76e5f2eda2eea695bb | refs/heads/master | 2021-05-26T07:47:40.541753 | 2020-10-13T13:34:44 | 2020-10-13T13:34:44 | 127,956,135 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,189 | py | import uuid
from hypothesis import given
from hypothesis import strategies as st
import pytest
import fakeredis
from persistentdict import RedisDict
DICT_CONTEXT_STRAT = (st.booleans() | st.datetimes() | st.text()
| st.integers() | st.dates() | st.times()
| st.timedeltas() | st.uuids() | st.characters())
@pytest.fixture
def redis():
redis_ = fakeredis.FakeStrictRedis()
yield redis_
redis_.flushall()
@pytest.fixture
def key():
return str(uuid.uuid4())
@pytest.fixture
def redisdict(redis, key):
return RedisDict(persistence=redis, key=key)
@given(key=DICT_CONTEXT_STRAT, val=DICT_CONTEXT_STRAT)
def test_persist_different_types_of_data(redisdict, key, val):
redisdict.clear()
redisdict[key] = val
try:
assert redisdict[key] == val
except AssertionError:
if (isinstance(val, complex) and isinstance(redisdict[key], complex)):
# nanj cannot be compared
assert str(redisdict[key]) == str(val)
else:
raise
assert list(redisdict.keys()) == [key]
assert list(redisdict.values()) == [val]
assert len(redisdict) == 1
def test_append(redisdict):
redisdict['key'] = []
redisdict['key'].append(1)
redisdict['key'].append(2)
redisdict['key'].append(3)
assert len(redisdict['key']) == 3
assert redisdict == {'key': [1, 2, 3]}
def test_keys(redisdict):
redisdict['some_key'] = 'something'
assert list(redisdict.keys()) == ['some_key']
another_key = 'another_key'
redisdict[another_key] = 'something 2'
assert sorted(list(redisdict.keys())) == [another_key, 'some_key']
del redisdict['some_key']
assert list(redisdict.keys()) == [another_key]
def test_clear(redisdict):
some_key = str(uuid.uuid4())
redisdict[some_key] = {'a': 'b'}
redisdict.clear()
assert list(redisdict.keys()) == []
with pytest.raises(KeyError):
redisdict[some_key]
with pytest.raises(KeyError):
redisdict[some_key]['a']
def test_has_key(redisdict):
some_key = str(uuid.uuid4())
redisdict[some_key] = 'something'
assert redisdict.has_key(some_key) is True # noqa
assert redisdict.has_key('unknown') is False # noqa
def test_contains(redisdict):
some_key = str(uuid.uuid4())
redisdict[some_key] = 'something'
assert (some_key in redisdict) is True
assert ('unknown' in redisdict) is False
def test_cache_in_sync_when_update_operations_performed(redisdict):
redisdict[1] = {'stuff': {}}
assert redisdict._cache == redisdict
assert redisdict == {1: {'stuff': {}}}
assert redisdict._cache == redisdict
redisdict[1]['stuff'] = {'a': 'b'}
assert redisdict._cache == {1: {'stuff': {'a': 'b'}}}
assert redisdict == {1: {'stuff': {'a': 'b'}}}
assert redisdict._cache == redisdict
assert redisdict._cache == {1: {'stuff': {'a': 'b'}}}
assert redisdict == {1: {'stuff': {'a': 'b'}}}
assert redisdict[1]['stuff'] == {'a': 'b'}
assert redisdict[1] == {'stuff': {'a': 'b'}}
assert redisdict._cache == redisdict
def test_dict_operations(redisdict, redis, key):
# exercise setitem
redisdict['10'] = 'ten'
redisdict['20'] = 'twenty'
redisdict['30'] = 'thirty'
assert len(redisdict) == 3
assert RedisDict(persistence=redis, key=key) == redisdict
# exercise delitem
del redisdict['20']
assert RedisDict(persistence=redis, key=key) == redisdict
# check getitem and setitem
assert redisdict['10'] == 'ten'
assert RedisDict(persistence=redis, key=key) == redisdict
# check keys() and delitem
assert sorted(list(redisdict.keys())) == sorted(['10', '30'])
assert RedisDict(persistence=redis, key=key) == redisdict
# has_key
assert redisdict.has_key('10') # noqa
assert not redisdict.has_key('20') # noqa
# __contains__
assert '10' in redisdict
assert '20' not in redisdict
# __iter__
assert sorted([k for k in redisdict]) == ['10', '30']
# __len__
assert len(redisdict) == 2
# items
assert sorted(list(redisdict.items())) == [('10', 'ten'), ('30', 'thirty')]
# keys
assert sorted(list(redisdict.keys())) == ['10', '30']
# values
assert sorted(list(redisdict.values())) == ['ten', 'thirty']
# get
assert redisdict.get('10') == 'ten'
assert redisdict.get('15', 'fifteen') == 'fifteen'
assert redisdict.get('15') is None
# setdefault
assert redisdict.setdefault('40', 'forty') == 'forty'
assert redisdict.setdefault('10', 'null') == 'ten'
del redisdict['40']
assert RedisDict(persistence=redis, key=key) == redisdict
# pop
assert redisdict.pop('10') == 'ten'
assert 10 not in redisdict
redisdict['10'] = 'ten'
assert redisdict.pop('x', 1) == 1
redisdict['x'] = 42
assert redisdict.pop('x', 1) == 42
assert RedisDict(persistence=redis, key=key) == redisdict
# popitem
k, v = redisdict.popitem()
assert k not in redisdict
redisdict[k] = v
# clear
redisdict.clear()
assert len(redisdict._cache) == 0
assert len(redisdict) == 0
# empty popitem
with pytest.raises(KeyError):
redisdict.popitem()
# update
redisdict.update({'10': 'ten', '20': 'twenty'})
assert redisdict['10'] == 'ten'
assert redisdict['20'] == 'twenty'
# cmp
normal_dict = {'10': 'ten', '20': 'twenty'}
assert normal_dict == {'10': 'ten', '20': 'twenty'}
redis.flushall()
redisdict2 = RedisDict(persistence=redis, key=key)
redisdict2['20'] = 'twenty'
redisdict2['10'] = 'ten'
assert normal_dict == redisdict2
def test_when_data_modified_in_another_instance_both_have_same_state(
redisdict, key, redis
):
redisdict2 = RedisDict(persistence=redis, key=key)
assert redisdict == redisdict2
redisdict2['a'] = 1
assert redisdict['a'] == redisdict2['a']
assert redisdict == redisdict2
redisdict['b'] = 2
assert redisdict['b'] == redisdict2['b']
assert redisdict == redisdict2
redisdict2.update({'c': 3})
assert redisdict['c'] == redisdict2['c']
assert redisdict == redisdict2
| [
"richard@richard.do"
] | richard@richard.do |
ad1efcfb5f395a11eb7029cf069c4a89781326ce | 1ee90596d52554cb4ef51883c79093897f5279a0 | /Sisteme/Systeme Update/Ingame Bank/Pack/locale/xx/ui/inventorywindow.py | 503f1cbd806475fd66a6942ee66fb43bcc0a540c | [] | no_license | Reizonr1/metin2-adv | bf7ecb26352b13641cd69b982a48a6b20061979a | 5c2c096015ef3971a2f1121b54e33358d973c694 | refs/heads/master | 2022-04-05T20:50:38.176241 | 2020-03-03T18:20:58 | 2020-03-03T18:20:58 | 233,462,795 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,134 | py | Search:
## 600 - (width + ¿À¸¥ÂÊÀ¸·Î ºÎÅÍ ¶ç¿ì±â 24 px)
"x" : SCREEN_WIDTH - 176,
"y" : SCREEN_HEIGHT - 37 - 565,
"style" : ("movable", "float",),
"width" : 176,
"height" : 565,
Change "height" value with +20:
## 600 - (width + ¿À¸¥ÂÊÀ¸·Î ºÎÅÍ ¶ç¿ì±â 24 px)
"x" : SCREEN_WIDTH - 176,
"y" : SCREEN_HEIGHT - 37 - 565,
"style" : ("movable", "float",),
"width" : 176,
"height" : 585,
---
Search:
"name" : "board",
"type" : "board",
"style" : ("attach",),
"x" : 0,
"y" : 0,
"width" : 176,
"height" : 565,
Change "height" value with +20:
"name" : "board",
"type" : "board",
"style" : ("attach",),
"x" : 0,
"y" : 0,
"width" : 176,
"height" : 585,
---
Search:
{
"name":"Money_Slot",
"type":"button",
"x":8,
"y":28,
"horizontal_align":"center",
"vertical_align":"bottom",
"default_image" : "d:/ymir work/ui/public/parameter_slot_05.sub",
"over_image" : "d:/ymir work/ui/public/parameter_slot_05.sub",
"down_image" : "d:/ymir work/ui/public/parameter_slot_05.sub",
"children" :
(
{
"name":"Money_Icon",
"type":"image",
"x":-18,
"y":2,
"image":"d:/ymir work/ui/game/windows/money_icon.sub",
},
{
"name" : "Money",
"type" : "text",
"x" : 3,
"y" : 3,
"horizontal_align" : "right",
"text_horizontal_align" : "right",
"text" : "123456789",
},
),
},
Add it under:
{
"name" : "Bar_First_Icon",
"type" : "button",
"x":7,
"y":47,
"vertical_align":"bottom",
"default_image" : "d:/ymir work/ui/bar_1.tga",
"over_image" : "d:/ymir work/ui/bar_1.tga",
"down_image" : "d:/ymir work/ui/bar_1.tga",
},
{
"name":"Bar_First_Slot",
"type":"button",
"x":22,
"y":48,
"vertical_align":"bottom",
"default_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"over_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"down_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"children" :
(
{
"name" : "BarFirstText",
"type" : "text",
"x" : 3,
"y" : 3,
"horizontal_align" : "right",
"text_horizontal_align" : "right",
"text" : "Unknown",
},
),
},
{
"name" : "Bar_Second_Icon",
"type" : "button",
"x":61,
"y":47,
"vertical_align":"bottom",
"default_image" : "d:/ymir work/ui/bar_2.tga",
"over_image" : "d:/ymir work/ui/bar_2.tga",
"down_image" : "d:/ymir work/ui/bar_2.tga",
},
{
"name":"Bar_Second_Slot",
"type":"button",
"x":76,
"y":48,
"vertical_align":"bottom",
"default_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"over_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"down_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"children" :
(
{
"name" : "BarSecondText",
"type" : "text",
"x" : 3,
"y" : 3,
"horizontal_align" : "right",
"text_horizontal_align" : "right",
"text" : "Unknown",
},
),
},
{
"name" : "Bar_Third_Icon",
"type" : "button",
"x":115,
"y":47,
"vertical_align":"bottom",
"default_image" : "d:/ymir work/ui/bar_3.tga",
"over_image" : "d:/ymir work/ui/bar_3.tga",
"down_image" : "d:/ymir work/ui/bar_3.tga",
},
{
"name":"Bar_Third_Slot",
"type":"button",
"x":130,
"y":48,
"vertical_align":"bottom",
"default_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"over_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"down_image" : "d:/ymir work/ui/public/parameter_slot_00.sub",
"children" :
(
{
"name" : "BarThirdText",
"type" : "text",
"x" : 3,
"y" : 3,
"horizontal_align" : "right",
"text_horizontal_align" : "right",
"text" : "Unknown",
},
),
},
| [
"59807064+Reizonr1@users.noreply.github.com"
] | 59807064+Reizonr1@users.noreply.github.com |
cc7339f1168ab2c72331647479e4a67e095c53d9 | d8e69e950d7f84b674868c801e0170f61a95aec7 | /ytmdl/utility.py | 1f7fc5f7a8e0bc870d1c4b673b2a19a0a3696323 | [
"MIT"
] | permissive | nanthu0123/ytmdl | ab50727cb1910632ef50c8b130e7f6be8f70e432 | 18ade43a450f964c7407649ac43229c978ee0b68 | refs/heads/master | 2022-04-15T00:26:58.437712 | 2020-04-12T11:13:06 | 2020-04-12T11:13:06 | 256,260,597 | 1 | 0 | MIT | 2020-04-16T15:49:41 | 2020-04-16T15:49:40 | null | UTF-8 | Python | false | false | 2,399 | py | """Some definitions to interact with the command line."""
import subprocess
from os import remove, path, popen
from ytmdl import defaults
from shutil import which
import ffmpeg
def exe(command):
"""Execute the command externally.
Written by Nishan Pantha.
"""
command = command.strip()
c = command.split()
output, error = subprocess.Popen(c,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
output = output.decode('utf-8').strip()
error = error.decode('utf-8').strip()
return (output, error)
def get_terminal_length():
"""Return the length of the terminal."""
rows, cols = popen('stty size', 'r').read().split()
return int(cols)
def convert_to_mp3r(path):
"""Convert the file to mp3 using ffmpeg."""
try:
new_name = path + '_new.mp3'
command = "ffmpeg -loglevel panic -i {} -vn -ar 44100 -ac 2 -ab {}k -f mp3 {}".format(path,
defaults.DEFAULT.SONG_QUALITY,
new_name)
output, error = exe(command)
# Delete the temp file now
remove(path)
return new_name
except Exception as e:
return e
def convert_to_mp3(path):
"""Covert to mp3 using the python ffmpeg module."""
new_name = path + '_new.mp3'
ffmpeg.input(path).output(
new_name,
loglevel='panic',
ar=44100,
ac=2,
ab='{}k'.format(defaults.DEFAULT.SONG_QUALITY),
f='mp3'
).run()
# Delete the temp file now
remove(path)
return new_name
def is_valid(dir_path):
"""Check if passed path is valid or not."""
if not path.isfile(dir_path):
return False
else:
return True
def get_songs(file_path):
"""Extract the songs from the provided list."""
if is_valid(file_path):
RSTREAM = open(file_path, 'r')
song_tup = RSTREAM.read().split("\n")
return song_tup
else:
return []
def is_present(app):
"""Check if the passed app is installed in the machine."""
return which(app) is not None | [
"deep.barman30@gmail.com"
] | deep.barman30@gmail.com |
bc969545fc35472150297ae552914b4964c464ba | 73c45163acf0b50f0a59cee471a36ff9576afee2 | /microblog.py | 0be63a20ea6497ccd7ef6899b3cbdfda9b987508 | [] | no_license | erfaenda/FlaskP | 7b8ec1413daba5a8f1c38eff2aec1767a6214365 | d56a47861a9e4b78d3af4ec58055eaddc046dcd1 | refs/heads/master | 2020-07-08T04:22:51.671076 | 2019-08-23T08:00:36 | 2019-08-23T08:00:36 | 203,563,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from app import app, db
from app.models import User, Posts
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Post': Posts} | [
"regalko7@gmail.com"
] | regalko7@gmail.com |
74f9e4ff4f9389660517eae375f5391ee2f4d8a7 | 7c0166f59f7c3f7e21daed42013e002a895a3404 | /demo1.py | b7634e6668794a081776c08326817c16fff375dd | [] | no_license | ESIPFed/corpy_example | 1daad044bb74566ae9717e328406379e618564c1 | 96905283c7c3aed4adc8d90e0fa9ef80979e0f9c | refs/heads/master | 2020-03-23T13:52:34.860593 | 2019-01-31T22:29:10 | 2019-01-31T22:29:10 | 141,642,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | #!/usr/bin/env python
# Very basic example of retrieving existing information
# from COR (registered ontologies, organizations, terms).
if __name__ == "__main__":
from pprint import pprint
import swagger_client
configuration = swagger_client.Configuration()
# Note: no authenticated operations in this simple demo.
api_client = swagger_client.ApiClient(configuration)
# ontologies
ont_api = swagger_client.OntologyApi(api_client)
print('\n\nOntologies' + '=' * 50 + '\n')
pprint(ont_api.ont_get())
iri = 'http://sweetontology.net/realmBiolBiome'
format = 'ttl'
print('\n\nOntology by iri=%s format=%s' % (iri, format) + '=' * 50 + '\n')
response = ont_api.ont_get(iri=iri, format=format)
pprint(response)
# organizations
org_api = swagger_client.OrganizationApi(api_client)
print('\n\nOrganizations' + '=' * 50 + '\n')
print('Organizations:')
pprint(org_api.org_get())
# terms
term_api = swagger_client.TermApi(api_client)
containing = 'temperature'
print('\n\nTerms containing "%s" in SPO' % containing + '=' * 50 + '\n')
pprint(term_api.term_get(containing=containing, _in='spo'))
| [
"carueda@mbari.org"
] | carueda@mbari.org |
bafd09593a3383c22ece5bd857b4b9b0976707f3 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/bin/format/macho/dyld/DyldCacheImageTextInfo.pyi | c7b63a33c2f9069b14ac71eeed812c265fd50396 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | pyi | import ghidra.app.util.bin
import ghidra.program.model.data
import java.lang
class DyldCacheImageTextInfo(object, ghidra.app.util.bin.StructConverter):
"""
Represents a dyld_cache_image_text_info structure.
"""
ASCII: ghidra.program.model.data.DataType = char
BYTE: ghidra.program.model.data.DataType = byte
DWORD: ghidra.program.model.data.DataType = dword
IBO32: ghidra.program.model.data.DataType = ImageBaseOffset32
POINTER: ghidra.program.model.data.DataType = pointer
QWORD: ghidra.program.model.data.DataType = qword
STRING: ghidra.program.model.data.DataType = string
UTF16: ghidra.program.model.data.DataType = unicode
UTF8: ghidra.program.model.data.DataType = string-utf8
VOID: ghidra.program.model.data.DataType = void
WORD: ghidra.program.model.data.DataType = word
def __init__(self, reader: ghidra.app.util.bin.BinaryReader):
"""
Create a new {@link DyldCacheImageTextInfo}.
@param reader A {@link BinaryReader} positioned at the start of a DYLD image text info
@throws IOException if there was an IO-related problem creating the DYLD image text info
"""
...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getPath(self) -> unicode:
"""
Gets the path of the image text.
@return The path of the image text.
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def path(self) -> unicode: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
b06120ede323cf3d63cbea50fdcdb63797cd167d | 70a960186af21ae6f7a8fcd4d13fde54892f1821 | /odds_and_ends/get_SUT_IP_through_iLO.py | 649ed33cf0f78f6d6bfaf0bda5097ee34ebfb0c4 | [] | no_license | apua/altair_mat | d6f31bacae62d4490d561c2f9ec07a745693e15e | 37dd580fd011aaae9ca52f99bb13757bab2df325 | refs/heads/master | 2021-04-29T05:49:01.561238 | 2015-08-21T09:40:50 | 2015-08-21T09:40:50 | 78,004,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | import hpilo
ilo_ip = vSphere_iLO = '16.153.114.143'
ilo = hpilo.Ilo(ilo_ip)
sut_ip = ilo.get_embedded_health()['nic_information']['NIC Port 1']['ip_address']
print(sut_ip)
| [
"apua.juan@hp.com"
] | apua.juan@hp.com |
b177160705e3f927a90758852169afc90dbdb4a7 | 40da64b0b2369d4e7b2fb4404aee9d8b55f3875c | /polls/migrations/0001_initial.py | 8433947ce9de5a4a6b923fb5f0c29291d1db7b47 | [] | no_license | alex1the1great/Polls-App | 1ae2ee26b4ad865412c0f1064a1bd3510ddfe896 | 0797c76bca5aa3918ec5bcf5f198eb16c8172a47 | refs/heads/master | 2022-07-18T03:38:45.026786 | 2020-05-18T13:17:01 | 2020-05-18T13:17:01 | 264,945,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # Generated by Django 3.0.6 on 2020-05-14 06:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
| [
"asimshrestha608@gmail.com"
] | asimshrestha608@gmail.com |
140c8029b5804163703fcd1d266071485bbe6407 | 51f7752df6a6e2b4dcee7ea585bacf7b9cb5ea14 | /304. Range Sum Query 2D - Immutable.py | 9c8d75ecccaed855f25909050f78310ff6aa9017 | [
"MIT"
] | permissive | ten2net/Leetcode-solution | a9ba7235987c0fdd1860d88ae461a4ea1fb979e4 | 97e84daa2926a9cd2036e0dee36dfe5773114b15 | refs/heads/master | 2021-01-21T20:29:42.570931 | 2016-12-06T10:29:18 | 2016-12-06T10:29:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
self.dp = [range(len(matrix[0])) for i in range(len(matrix))]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
self.dp[i][j] = matrix[i][j]
if i > 0:
self.dp[i][j] += self.dp[i-1][j]
if j > 0:
self.dp[i][j] += self.dp[i][j-1]
if i>0 and j>0:
self.dp[i][j] -= self.dp[i-1][j-1]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
ret = self.dp[row2][col2]
if row1>0:
ret -= self.dp[row1-1][col2]
if col1>0:
ret -= self.dp[row2][col1-1]
if row1>0 and col1>0:
ret += self.dp[row1-1][col1-1]
return ret
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix([ [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]])
numMatrix = NumMatrix([[-2]])
print numMatrix.sumRegion(0,0,0,0)
# print numMatrix.sumRegion(0, 1, 2, 3)
# print numMatrix.sumRegion(1, 2, 3, 4)
# print numMatrix.sumRegion(2, 1, 4, 3) #-> 8
# print numMatrix.sumRegion(1, 1, 2, 2) #-> 11
# print numMatrix.sumRegion(1, 2, 2, 4) #-> 12 | [
"982899917@qq.com"
] | 982899917@qq.com |
1018dd569e6b808afb8f20a75118d791ed4e2b11 | afdcb60d13b7516930bc361c7ffa1c4bc4b3fddf | /QANet/lib/data/evaluation/semseg_eval.py | 4f628d40d3c538e91684843180d9db5bdafd71fa | [
"MIT"
] | permissive | sll-ll/parsing-rcc | 7012c8f4b09c86b554b0c020d4a46329966a8e92 | 7a9a67120b706f0d386d3638f84e402ebdb05fc4 | refs/heads/main | 2023-05-24T02:07:33.892667 | 2021-06-07T13:20:15 | 2021-06-07T13:20:15 | 374,361,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,497 | py | import cv2
import numpy as np
import os
from tqdm import tqdm
import torch
from lib.data.structures.semantic_segmentation import convert_pano_to_semseg, convert_poly_to_semseg
from lib.utils.misc import logging_rank
class SemSegEvaluator:
"""
Evaluate semantic segmentation
"""
def __init__(self, dataset, root_dir, pre_dir, num_classes, gt_dir=None):
"""
Initialize SemSegEvaluator
:return: None
"""
self.pre_dir = pre_dir
self.dataset = dataset
self.num_classes = num_classes
self.extra_fields = dataset.extra_fields
self.ids = dataset.ids
if gt_dir is not None:
self.gt_dir = gt_dir
else:
self.gt_dir = self.extra_fields['seg_root'] if 'seg_root' in self.extra_fields \
else root_dir.replace('img', 'seg')
self.ignore_label = self.extra_fields['ignore_label'] if 'ignore_label' in self.extra_fields else 255
self.label_shift = self.extra_fields['label_shift'] if 'label_shift' in self.extra_fields else 0
self.name_trans = self.extra_fields['name_trans'] if 'name_trans' in self.extra_fields else ['jpg', 'png']
self.stats = dict()
def fast_hist(self, a, b):
k = (a >= 0) & (a < self.num_classes)
return np.bincount(
self.num_classes * a[k].astype(int) + b[k], minlength=self.num_classes ** 2
).reshape(self.num_classes, self.num_classes)
def generate_gt_png(self, i, image_name, size):
if 'pano_anns' not in self.extra_fields:
if self.extra_fields['semseg_format'] == "mask":
gt_png = cv2.imread(os.path.join(self.gt_dir, image_name), 0) + self.label_shift
else:
assert self.extra_fields['semseg_format'] == "poly"
anno = self.dataset.coco.loadAnns(self.dataset.coco.getAnnIds(i))
classes = [obj["category_id"] for obj in anno]
classes = [self.dataset.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
extra_seg = self.dataset.extra_seg
extra_semsegs = extra_seg.loadAnns(extra_seg.getAnnIds(i)) if extra_seg else None
semsegs_anno = [[obj["segmentation"] for obj in anno], extra_semsegs]
gt = convert_poly_to_semseg((size[1], size[0]), semsegs_anno, classes, 1, self.extra_fields)
gt_png = gt.numpy()
else:
image_path = os.path.join(self.gt_dir, image_name)
gt = convert_pano_to_semseg(image_path, self.extra_fields, image_name)
gt_png = gt.numpy()
return gt_png
def evaluate(self):
logging_rank('Evaluating Semantic Segmentation predictions')
hist = np.zeros((self.num_classes, self.num_classes))
for i in tqdm(self.ids, desc='Calculating IoU ..'):
image_name = self.dataset.coco.imgs[i]['file_name'].replace(self.name_trans[0], self.name_trans[1])
if not (os.path.exists(os.path.join(self.gt_dir, image_name)) and
os.path.exists(os.path.join(self.pre_dir, image_name))):
continue
pre_png = cv2.imread(os.path.join(self.pre_dir, image_name), 0)
gt_png = self.generate_gt_png(i, image_name, pre_png.shape)
assert gt_png.shape == pre_png.shape, '{} VS {}'.format(str(gt_png.shape), str(pre_png.shape))
gt = gt_png.flatten()
pre = pre_png.flatten()
hist += self.fast_hist(gt, pre)
def mean_iou(overall_h):
iu = np.diag(overall_h) / (overall_h.sum(1) + overall_h.sum(0) - np.diag(overall_h) + 1e-10)
return iu, np.nanmean(iu)
def per_class_acc(overall_h):
acc = np.diag(overall_h) / (overall_h.sum(1) + 1e-10)
return np.nanmean(acc)
def pixel_wise_acc(overall_h):
return np.diag(overall_h).sum() / overall_h.sum()
iou, miou = mean_iou(hist)
mean_acc = per_class_acc(hist)
pixel_acc = pixel_wise_acc(hist)
self.stats.update(dict(IoU=iou, mIoU=miou, MeanACC=mean_acc, PixelACC=pixel_acc))
def accumulate(self, p=None):
pass
def summarize(self):
iStr = ' {:<18} @[area={:>6s}] = {:0.4f}'
for k, v in self.stats.items():
if k == 'IoU':
continue
logging_rank(iStr.format(k, 'all', v))
def __str__(self):
self.summarize()
def semseg_png(score, dataset=None, img_info=None, output_folder=None, semseg=None, target=None):
semseg_pres_dir = os.path.join(output_folder, 'semseg_pres')
if not os.path.exists(semseg_pres_dir):
os.makedirs(semseg_pres_dir)
im_name = img_info['file_name']
extra_fields = dataset.extra_fields
name_trans = extra_fields['name_trans'] if 'name_trans' in extra_fields else ['jpg', 'png']
save_semseg_pres = os.path.join(semseg_pres_dir, im_name.replace(name_trans[0], name_trans[1]))
cv2.imwrite(save_semseg_pres, score.astype(np.uint8))
if target is not None:
semseg_gt_dir = os.path.join(output_folder, 'semseg_gt')
label = target.get_field("semsegs").semseg.squeeze(0).numpy()
if not os.path.exists(semseg_gt_dir):
os.makedirs(semseg_gt_dir)
save_semseg_gt = os.path.join(semseg_gt_dir, im_name.replace(name_trans[0], name_trans[1]))
cv2.imwrite(save_semseg_gt, label.astype(np.uint8))
| [
"you@example.com"
] | you@example.com |
1c9524af0279b17c8fac0068bc39504389be7d41 | 0e1245a588be591e7a5752cbe23774c172929f81 | /22.py | 7fbf4915b2f6e62772a1fb90669e79d68676a2b8 | [] | no_license | Phantom1911/leetcode | 9e41c82f712c596dc58589afb198acedd9351e6b | b9789aa7f7d5b99ff41f2791a292a0d0b57af67f | refs/heads/master | 2022-07-10T22:00:01.424841 | 2022-06-08T09:00:32 | 2022-06-08T09:00:32 | 207,652,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | class Solution:
def generateParenthesis(self, n: int) -> List[str]:
# valid string : you must open a parantheses before closing it
arr = []
generate(n, n, "", arr)
return arr
def generate(remopen, remclose, currstr, arr):
if remopen == 0 and remclose == 0:
arr.append(currstr)
return
if remopen > 0:
generate(remopen - 1, remclose, currstr + '(', arr)
if remclose > remopen:
generate(remopen, remclose - 1, currstr + ')', arr) | [
"aastik.koshta@flipkart.com"
] | aastik.koshta@flipkart.com |
0504a39bd3a53ce3fb37f9012f1f4a1ee8066d17 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D00A/STATACD00AUN.py | dd635c94e98bc6adaf457cc3c6cccc8173fce158 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 791 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD00AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'NAD', MIN: 1, MAX: 99, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'DOC', MIN: 1, MAX: 200000, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 5},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 1, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
12262c97436aca1a39c2d16fcba4418c422a4ff1 | 9fa547b23ebd17f1508260fe571886e7da564f97 | /lab3_task/cloud_generator.py | 9aa4bec7bf85f005397b709b319eeb26fc7a52a2 | [] | no_license | serhiisad/sc_l3 | 2218292e1cd189729092ecb6106f89618eeda337 | 44ab35fb1ef33bfbd0af9efebb418fa0a9cee49e | refs/heads/master | 2020-03-19T05:32:12.024381 | 2018-06-03T21:36:44 | 2018-06-03T21:36:44 | 135,941,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from wordcloud import WordCloud
import matplotlib.pyplot as plt
class WordCloudCreator:
#some prescriptions
STOPWORDS = "english"
def create_cloud(word_string):
cloud = WordCloud(font_path="/fonts/Symbola.ttf",
stopwords=STOPWORDS,
background_color='black',
width=1200,
height=1000
).generate(word_string)
plt.imshow(cloud)
plt.axis('off')
plt.show()
return plt
| [
"serhiisad.kpi@gmail.com"
] | serhiisad.kpi@gmail.com |
7808b35e64fd8cce3f6d620a7b484413efedb4a9 | 30cb8fc82cd61fef36e500f7aceaf6cf97c26af2 | /blogengine/blogengine/blogengine/settings.py | 26de304da08ed30bc4d1657aa548455f041f982c | [] | no_license | VladyslavHnatchenko/python_django_blog | c968754e5137ff22ff51e5728ae1741fc1220c45 | 5ab5394e0adcac0371609df11404e0bac7a83b7d | refs/heads/master | 2020-03-30T13:27:27.473440 | 2018-10-04T12:23:58 | 2018-10-04T12:23:58 | 151,273,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,287 | py | """
Django settings for blogengine project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.conf.global_settings import STATICFILES_DIRS
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v*=b)@&^sq+uza+d1^b!2y9$rjakxw44qxti+i2f2_(h4f=mj3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogengine.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogengine.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| [
"hnatchenko.vladyslav@gmail.com"
] | hnatchenko.vladyslav@gmail.com |
c66beb40cbc2a5bde1cc5a17bba6f3023a3a46c8 | e05f89b57ebb86edddafe87e1cf77afb3ad9a48b | /python/code/logging_examples/app_ini_logging.py | f2f00b3ca7c612f41046c80c651d899dfdd23bd3 | [] | no_license | sylvaus/presentations | 6c7736042576d0f66d88fa41111b87d29138ac32 | 43c1e33e4764dc609d49927f57354626f281fa9e | refs/heads/master | 2021-07-02T03:32:01.303038 | 2020-11-15T21:43:36 | 2020-11-15T21:43:36 | 187,127,340 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | import logging
import logging.config
from logged_library import printer
def main():
logging.config.fileConfig("config.ini")
logger = logging.getLogger()
logger.debug("root_print")
printer.log_print_info("log_print")
printer_class = printer.Printer()
printer_class.log_class_print_warning("log_print_class")
if __name__ == '__main__':
main()
| [
"pierreyves.breches74@gmail.com"
] | pierreyves.breches74@gmail.com |
21835f3224579fe2ff6b3757bff70f66dc684ee6 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/-57053121/PyQt5/QtCore/QXmlStreamAttributes.py | 46175e1c688007918b294c66a284da0c6e8e9ef9 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,417 | py | # encoding: utf-8
# module PyQt5.QtCore
# from C:\Users\Doly\Anaconda3\lib\site-packages\PyQt5\QtCore.pyd
# by generator 1.147
# no doc
# imports
import enum as __enum
import sip as __sip
class QXmlStreamAttributes(__sip.simplewrapper):
"""
QXmlStreamAttributes()
QXmlStreamAttributes(QXmlStreamAttributes)
"""
def append(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
append(self, str, str, str)
append(self, str, str)
append(self, QXmlStreamAttribute)
"""
pass
def at(self, p_int): # real signature unknown; restored from __doc__
""" at(self, int) -> QXmlStreamAttribute """
return QXmlStreamAttribute
def clear(self): # real signature unknown; restored from __doc__
""" clear(self) """
pass
def contains(self, QXmlStreamAttribute): # real signature unknown; restored from __doc__
""" contains(self, QXmlStreamAttribute) -> bool """
return False
def count(self, QXmlStreamAttribute=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
count(self, QXmlStreamAttribute) -> int
count(self) -> int
"""
return 0
def data(self): # real signature unknown; restored from __doc__
""" data(self) -> sip.voidptr """
pass
def fill(self, QXmlStreamAttribute, size=-1): # real signature unknown; restored from __doc__
""" fill(self, QXmlStreamAttribute, size: int = -1) """
pass
def first(self): # real signature unknown; restored from __doc__
""" first(self) -> QXmlStreamAttribute """
return QXmlStreamAttribute
def hasAttribute(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
hasAttribute(self, str) -> bool
hasAttribute(self, str, str) -> bool
"""
return False
def indexOf(self, QXmlStreamAttribute, from_=0): # real signature unknown; restored from __doc__
""" indexOf(self, QXmlStreamAttribute, from_: int = 0) -> int """
return 0
def insert(self, p_int, QXmlStreamAttribute): # real signature unknown; restored from __doc__
""" insert(self, int, QXmlStreamAttribute) """
pass
def isEmpty(self): # real signature unknown; restored from __doc__
""" isEmpty(self) -> bool """
return False
def last(self): # real signature unknown; restored from __doc__
""" last(self) -> QXmlStreamAttribute """
return QXmlStreamAttribute
def lastIndexOf(self, QXmlStreamAttribute, from_=-1): # real signature unknown; restored from __doc__
""" lastIndexOf(self, QXmlStreamAttribute, from_: int = -1) -> int """
return 0
def prepend(self, QXmlStreamAttribute): # real signature unknown; restored from __doc__
""" prepend(self, QXmlStreamAttribute) """
pass
def remove(self, p_int, p_int_1=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
remove(self, int)
remove(self, int, int)
"""
pass
def replace(self, p_int, QXmlStreamAttribute): # real signature unknown; restored from __doc__
""" replace(self, int, QXmlStreamAttribute) """
pass
def size(self): # real signature unknown; restored from __doc__
""" size(self) -> int """
return 0
def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads
"""
value(self, str, str) -> str
value(self, str) -> str
"""
return ""
def __contains__(self, *args, **kwargs): # real signature unknown
""" Return key in self. """
pass
def __delitem__(self, *args, **kwargs): # real signature unknown
""" Delete self[key]. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __getitem__(self, *args, **kwargs): # real signature unknown
""" Return self[key]. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __iadd__(self, *args, **kwargs): # real signature unknown
""" Implement self+=value. """
pass
def __init__(self, QXmlStreamAttributes=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __len__(self, *args, **kwargs): # real signature unknown
""" Return len(self). """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __setitem__(self, *args, **kwargs): # real signature unknown
""" Set self[key] to value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__hash__ = None
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
90feb22e8c06ee4501a11cf0116368b9dd46e382 | 000e3e3ed05b76c50ad5ed6ee82defb4e84528ce | /Week 5/Quizz 5/ProbabilityWithoutInput/Anharmonic Potential/Approx/matrix_square_anharmonic.py | fb365b070ab960764f7b31cc9d98707ef0ad31a2 | [] | no_license | mattborghi/StatisticalMechanicsComputing | 50eb0a7a5c216dbcab13c4cc43b752447eca424d | 38b8fa821f23e1a71862f581cb1c42c178a8ad4d | refs/heads/master | 2021-01-19T15:19:40.316686 | 2017-08-21T14:32:39 | 2017-08-21T14:32:39 | 100,960,052 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | import math, numpy
import matplotlib.pyplot as plt
#Anharmonic potential
def V(x,cubic,quartic):
return x ** 2.0 / 2.0 + cubic * x ** 3 + quartic * x ** 4
# Free off-diagonal density matrix
def rho_free(x, xp, beta):
return (math.exp(-(x - xp) ** 2 / (2.0 * beta)) /
math.sqrt(2.0 * math.pi * beta))
# Anharmonic density matrix in the Trotter approximation (returns the full matrix)
#rho(x,x',beta) = exp(-beta V(x) / 2) rho_free(x, x', beta) exp(-beta V(x') / 2).
def rho_anharmonic_trotter(grid, beta,cubic,quartic):
return numpy.array([[rho_free(x, xp, beta) * \
numpy.exp(-0.5 * beta * ( V(x, cubic, quartic) + V(xp, cubic, quartic)) ) \
for x in grid] for xp in grid])
def pi_quant(input_val,beta):
return [ math.sqrt( math.tanh(beta/2.0) / math.pi)* math.exp(- (x)**2 * math.tanh(beta/2.0) ) for x in input_val]
#Energy perturbation and Partition function
def Energy_pert(n, cubic, quartic):
#print n
return n + 0.5 - 15.0 / 4.0 * cubic **2.0 * (n ** 2.0 + n + 11.0 / 30.0) \
+ 3.0 / 2.0 * quartic * (n ** 2.0 + n + 1.0 / 2.0)
def Z_pert(cubic, quartic, beta, n_max):
#print n_max
Z = sum(math.exp(-beta * Energy_pert(n, cubic, quartic)) for n in range(n_max + 1))
return Z
quartic = [0.001,0.01,0.1,0.2,0.3,0.4]#,0.5
cubic = [-x for x in quartic]
x_max = 5.0
nx = 100
dx = 2.0 * x_max / (nx - 1)
x = [i * dx for i in range(-(nx - 1) / 2, nx / 2 + 1)]
Z_pp = []
Z = []
for ind in range(len(quartic)):
beta_tmp = 2.0 ** (-5) # initial value of beta (power of 2)
beta = 2.0 # actual value of beta (power of 2)
rho = rho_anharmonic_trotter(x, beta_tmp,cubic[ind],quartic[ind]) # density matrix at initial beta
while beta_tmp < beta:
rho = numpy.dot(rho, rho)
rho *= dx
beta_tmp *= 2.0
print 'beta: %s -> %s' % (beta_tmp / 2.0, beta_tmp)
Z_pp.append( Z_pert(cubic[ind],quartic[ind],beta_tmp,nx) )
Z.append( sum(rho[j, j] for j in range(nx + 1)) * dx )
#pi_of_x = [rho[j, j] / Z[ind] for j in range(nx + 1)]
f = open('part_function_data_anharm_matrixsquaring_beta' + str(beta) + '.dat', 'w')
for j in range(len(quartic)):
#f.write(str(x[j]) + ' ' + str(rho[j, j] / Z) + '\n')
f.write(str(Z_pp[j]) + ' ' + str(Z[j]) + '\n')
f.close()
#plt.figure(figsize=(20,10))
#plt.plot(x,pi_of_x,color="red",label="pi(x)",linewidth=4.0)
#plt.plot(x,pi_quant(x,beta_tmp),'ob',label="pi_quant(x)",linewidth=2.0)
#plt.legend()
#plt.title('SAnHO Particle Positions \n Matrix Square Harmonic\n $beta$ = %0.2f \n $Temp$ = %0.2f'% (beta_tmp,1/float(beta_tmp)) )
#plt.xlabel('$<Positions>$', fontsize=14)
#plt.ylabel('$Frequency$', fontsize=14)
#plt.grid()
#plt.show()
#plt.savefig('sho_matrix_square_beta%0.2f.png'%beta_tmp) | [
"borghi.matias@gmail.com"
] | borghi.matias@gmail.com |
08d5af84367be41f2bbf4b8649362fce919addc8 | c9f67529e10eb85195126cfa9ada2e80a834d373 | /lib/python3.5/site-packages/torch/legacy/nn/SmoothL1Criterion.py | 8b84ef6d94b2278e8e4568b6f609acc2bafc7a61 | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 1,093 | py | import torch
from .Criterion import Criterion
class SmoothL1Criterion(Criterion):
def __init__(self, sizeAverage=True):
super(SmoothL1Criterion, self).__init__()
self.sizeAverage = sizeAverage
self.output_tensor = None
def updateOutput(self, input, target):
if self.output_tensor is None:
self.output_tensor = input.new(1)
self._backend.SmoothL1Criterion_updateOutput(
self._backend.library_state,
input,
target,
self.output_tensor,
self.sizeAverage,
True, # reduce
)
self.output = self.output_tensor[0].item()
return self.output
def updateGradInput(self, input, target):
implicit_gradOutput = torch.ones(1).type_as(input)
self._backend.SmoothL1Criterion_updateGradInput(
self._backend.library_state,
input,
target,
implicit_gradOutput,
self.gradInput,
self.sizeAverage,
True, # reduce
)
return self.gradInput
| [
"chilung.cs06g@nctu.edu.tw"
] | chilung.cs06g@nctu.edu.tw |
10737f8669e1c5b70375d5b52822908ff4f8d2d8 | 4e0ee2b68398a90b0986975f645350033a624558 | /tests/matrix_conv2d/test_matrix_conv2d_int32_3x3_stride1_concur_och16.py | e8a2724030fce34ffd99015257685748fc20d6a8 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kindsenior/nngen | 697b80b32cf2b33e7f2c64e4d1a27eb2d739b30c | 301b19b35e50174d8abb1a757b061ae80cdfe612 | refs/heads/master | 2022-09-21T05:53:34.565461 | 2020-05-03T14:58:19 | 2020-05-03T14:58:19 | 269,007,213 | 0 | 0 | Apache-2.0 | 2020-06-03T06:26:43 | 2020-06-03T06:26:42 | null | UTF-8 | Python | false | false | 2,835 | py | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
import veriloggen
import matrix_conv2d
act_shape = (1, 7, 7, 15)
weight_shape = (7, 3, 3, 15)
bias_shape = None
scale_shape = None
act_dtype = ng.int32
weight_dtype = ng.int32
bias_dtype = ng.int32
scale_dtype = ng.int32
out_dtype = ng.int32
stride = (1, 1, 1, 1)
rshift_mul = None
rshift_sum = None
rshift_out = None
act_func = None
par_ich = 1
par_och = 1
par_col = 1
par_row = 1
concur_och = 16
stationary = 'filter'
input_ram_size = None
filter_ram_size = None
bias_ram_size = None
scale_ram_size = None
out_ram_size = None
axi_datawidth = 32
def test(request, silent=True):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = matrix_conv2d.run(act_shape, weight_shape,
bias_shape, scale_shape,
act_dtype, weight_dtype,
bias_dtype, scale_dtype,
out_dtype,
stride,
rshift_mul, rshift_sum, rshift_out,
act_func,
par_ich, par_och, par_col, par_row,
concur_och, stationary,
input_ram_size, filter_ram_size,
bias_ram_size, scale_ram_size,
out_ram_size,
axi_datawidth, silent,
filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
if __name__ == '__main__':
rslt = matrix_conv2d.run(act_shape, weight_shape,
bias_shape, scale_shape,
act_dtype, weight_dtype,
bias_dtype, scale_dtype,
out_dtype,
stride,
rshift_mul, rshift_sum, rshift_out,
act_func,
par_ich, par_och, par_col, par_row,
concur_och, stationary,
input_ram_size, filter_ram_size,
bias_ram_size, scale_ram_size,
out_ram_size,
axi_datawidth, silent=False,
filename='tmp.v',
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
print(rslt)
| [
"shta.ky1018@gmail.com"
] | shta.ky1018@gmail.com |
ddb778476d3c9d579c8c1b78b74e7face47b21fc | 536bbba61e839d1c3299d1df28b4c630d3e0670a | /series-dataframe.py | 98ccb228bf36ce1f94929ba696222f06aa48a7b2 | [] | no_license | nearxu/pandas-exercise | 6af6a679fd49575c1fc3a29c31013b00f614fb49 | 1bd9d5e7ba7db4caac0df0ff7aca20873a29548b | refs/heads/master | 2020-04-05T07:50:37.984283 | 2018-11-09T10:45:11 | 2018-11-09T10:45:11 | 156,690,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | import pandas as pd
import numpy as np
data = [1, 2, 3]
index = ['a', 'b', 'c']
s = pd.Series(data=data, index=index, name='sss')
# dataFrame
data = [[1, 2, 3],
[4, 5, 6]]
index = ['a', 'b']
columns = ['A', 'B', 'C']
df = pd.DataFrame(data=data, index=index, columns=columns)
tips = pd.read_csv('data/tips.csv')
print(tips.head())
| [
"2448895924@qq.com"
] | 2448895924@qq.com |
5e85f96660a91e8f43b08fca1f653db3bb7bf855 | 51f438e14dbd2ee48765abaa53364f519300face | /benchmarks/utils/backend.py | 12251151ba2ad2bc88de11429492556fbfb1f6eb | [
"MIT"
] | permissive | chainer/chainer-benchmark | af946ce8809df395378176c89c7de5686426ba97 | 8d0c8f5052b5e2a85ad522ff48899ffc9a2bfafb | refs/heads/master | 2021-04-09T11:50:13.829938 | 2018-08-30T08:49:48 | 2018-08-30T08:49:48 | 125,476,025 | 9 | 2 | MIT | 2019-03-20T11:06:11 | 2018-03-16T06:53:57 | Python | UTF-8 | Python | false | false | 6,592 | py | from functools import wraps
import inspect
import os
import warnings
import chainer
import cupy
import numpy
from benchmarks.utils.helper import _is_func
from benchmarks.utils.helper import parameterize
from benchmarks.utils.helper import sync
_backend_modes = [
# GPU (with use_cudnn == 'never')
'gpu',
# GPU (with use_cudnn == 'auto')
'gpu-cudnn',
# CPU (with use_ideep == 'never')
'cpu',
# CPU (with use_ideep == 'auto')
'cpu-ideep',
]
_enabled_backend_modes = (
os.environ['CHAINER_BENCHMARK_BACKENDS'].split(',')
if 'CHAINER_BENCHMARK_BACKENDS' in os.environ
else _backend_modes
)
assert all([x in _backend_modes for x in _enabled_backend_modes])
def backends(*modes):
"""Class decorator to parameterize the benchmark class with backends.
This is a special form of :func:`parameterize` to parameterize the
backend variation. For all `time_*` functions and `setup` function
in the class, this decorator:
* wraps the function to be called with the Chainer configuration
(`use_cudnn` and `use_ideep`) set to the current backend variation.
* wraps the function to perform CPU/GPU synchronization after the
benchmark, when the current backend variation uses GPU. The time
taken for synchronization is counted as a elapsed time in the benchmark.
* injects the array module (`cupy` or `numpy` depending on the current
variation) as `self.xp` so that benchmark code can use it to work with
array modules with each backend.
* provides access to `is_backend_gpu()` and `is_backend_ideep()` methods
so that benchmark code can use it to change behavior depending on the
backend variation (e.g., `if is_backend_gpu(): model.to_gpu()`).
This decorator adds parameter axis with the name of `backend`.
Note that `cpu-ideep` mode will automatically be skipped if the current
benchmark setup does not support it, e.g., when running benchmark
against older Chainer version that does not support iDeep.
You cannot apply `parameterize` decorator to the class already decorated
by this decorator. If you want to use `parameterize` along with this
decorator, make `parameterize` the most inner (i.e., the closest to the
class declaration) decorator.
Example of usage is as follows:
>>> @backend('gpu', 'gpu-cudnn', 'cpu', 'cpu-ideep')
... class ConvolutionBenchmark(object):
... def time_benchmark(self):
... ...
You can temporarily limit the backend variation by setting list of
comma-separated backend names to CHAINER_BENCHMARK_BACKENDS environment
variable. For example, ``CHAINER_BENCHMARK_BACKENDS=gpu-cudnn,cpu-ideep``
can be used to skip running benchmark for ``gpu`` and ``cpu``.
"""
assert all([m in _backend_modes for m in modes])
def _wrap_class(klass):
assert isinstance(klass, type)
return _inject_backend_mode(klass, modes)
return _wrap_class
def _inject_backend_mode(klass, modes):
klass = parameterize([('backend', modes)])(klass)
# `setup` method is mandatory to inject backends to skip axis.
if not hasattr(klass, 'setup'):
def _setup(self, *args, **kwargs):
pass
klass.setup = _setup
members = inspect.getmembers(klass, predicate=_is_func)
for (name, func) in members:
if not (name == 'setup' or name.startswith('time_')):
continue
def _wrap_func(f):
@wraps(f)
def _wrapped_func(self, backend, *args, **kwargs):
_benchmark_backend_gpu = False
_benchmark_backend_ideep = False
xp = numpy
use_cudnn = 'never'
use_ideep = 'never'
target = f
if backend not in _enabled_backend_modes:
# Raise in `setup` to skip this parameter axis.
warnings.warn('Backend disabled: {}'.format(backend))
raise NotImplementedError
elif backend.startswith('gpu'):
xp = cupy
_benchmark_backend_gpu = True
target = sync(target)
if 'cudnn' in backend:
use_cudnn = 'auto'
elif 'ideep' in backend:
if not have_ideep():
# Raise in `setup` to skip this parameter axis.
warnings.warn('iDeep is unavailable')
raise NotImplementedError
use_ideep = 'auto'
_benchmark_backend_ideep = True
with _BackendConfig({
'use_cudnn': use_cudnn,
'use_ideep': use_ideep,
'_benchmark_backend_gpu': _benchmark_backend_gpu,
'_benchmark_backend_ideep': _benchmark_backend_ideep,
}):
# Inject self.xp
assert not hasattr(self, 'xp')
setattr(self, 'xp', xp)
target(self, *args, **kwargs)
delattr(self, 'xp')
return _wrapped_func
setattr(klass, name, _wrap_func(func))
return klass
class _BackendConfig(object):
"""Context manager that changes multiple Chainer configurations."""
def __init__(self, params):
self._params = params
self._contexts = []
def __enter__(self):
self._contexts = [
chainer.using_config(k, v) for (k, v) in self._params.items()
]
for c in self._contexts:
c.__enter__()
return self
def __exit__(self, typ, value, traceback):
for c in reversed(self._contexts):
c.__exit__(typ, value, traceback)
def is_backend_gpu():
"""Returns True if the current backend is GPU."""
return chainer.config._benchmark_backend_gpu
def is_backend_ideep():
"""Returns True if the current backend is iDeep."""
return chainer.config._benchmark_backend_ideep
def have_ideep():
"""Tests if iDeep can be used in the current benchmark configuration.
If you intend to write benchmark for iDeep outside of `backend` decorator,
first make sure that iDeep is available using this function.
This makes possible to run the same benchmark code over past versions of
Chainer (prior to iDeep support).
"""
try:
import chainer.backends.intel64
except ImportError:
return False
return chainer.backends.intel64.is_ideep_available()
| [
"webmaster@kenichimaehashi.com"
] | webmaster@kenichimaehashi.com |
a733a83e1059e48a75f844145f9800e588e978bc | 079c14bcf35d886ec38834a428d8147bbf3d8dea | /Leetcode_30day_challenge/June_Challenge_2021/Day-15_Matchsticks_to_Square.py | cb70a51d1826c593653a9109f00409370c46601d | [] | no_license | yash-markad/Python-Competitive-Programming | 882666a17dfd93346ed5f75609e5d9e8ad36ac46 | 7fc5d32115bf80f7210f591a2de9a53adc297d98 | refs/heads/master | 2023-05-28T12:00:45.892633 | 2021-06-16T11:13:02 | 2021-06-16T11:13:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py |
class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
k = 4
sums = [0] * k
subsum = sum(matchsticks)/k
matchsticks.sort(reverse=True)
def solve(i):
if i == len(matchsticks):
return len(set(sums)) == 1
for j in range(k):
sums[j] += matchsticks[i]
if sums[j] <= subsum and solve(i+1):
return True
sums[j] -= matchsticks[i]
if sums[j] == 0:
break
return False
return solve(0)
| [
"noreply@github.com"
] | yash-markad.noreply@github.com |
ff2d7457b3eb03a65c96f149e9e98e8e0dc6e498 | 73de523bde0c9e8398c63a924b44aadc46d11202 | /test/test_event_events_extended.py | a368347b2cbcc2aad2c37dbc42689cc3f9fca78e | [
"MIT"
] | permissive | Feyd-Aran/isilon_sdk_python | 1c2fae306c1a95a99024dd13dc0fc3b120f9c1de | 24e85a5577d15ac3db06862d07d5a261658c67b7 | refs/heads/v8.0.0 | 2020-09-23T00:16:36.684270 | 2019-12-02T13:45:12 | 2019-12-02T13:45:12 | 225,351,700 | 0 | 0 | MIT | 2019-12-02T10:51:54 | 2019-12-02T10:51:53 | null | UTF-8 | Python | false | false | 928 | py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_7_2
from isi_sdk_7_2.models.event_events_extended import EventEventsExtended # noqa: E501
from isi_sdk_7_2.rest import ApiException
class TestEventEventsExtended(unittest.TestCase):
"""EventEventsExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEventEventsExtended(self):
"""Test EventEventsExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_7_2.models.event_events_extended.EventEventsExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"brandonfkrueger@gmail.com"
] | brandonfkrueger@gmail.com |
2ad26753b396688aab0f073a1e410eb122edde09 | ab0315bcded75c10c591076b22ed8ff664ee76af | /fig4/8mods_round4_0919/config_scf_8mods_data_freeze_190917_sub5_1_1.py | 34c4c928675525a52ac356f5291189bb55c948e2 | [] | no_license | mukamel-lab/BICCN-Mouse-MOp | 389f62492986a2ffe4278ed16f59fc17dc75b767 | 8058ab8ae827c6e019fff719903b0ba5b400931d | refs/heads/master | 2021-07-06T11:14:25.401628 | 2020-09-30T04:54:27 | 2020-09-30T04:54:27 | 189,758,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | #!/usr/bin/env python3
"""An example configuration file
"""
import sys
sys.path.insert(0, '/cndd/fangming/CEMBA/snmcseq_dev')
import os
import snmcseq_utils
# # Configs
name = 'mop_8mods_0915_k30_sub5-1-1'
outdir = '/cndd/fangming/CEMBA/data/MOp_all/results'
output_pcX_all = outdir + '/pcX_all_{}.npy'.format(name)
output_cells_all = outdir + '/cells_all_{}.npy'.format(name)
output_imputed_data_format = outdir + '/imputed_data_{}_{{}}.npy'.format(name)
output_clst_and_umap = outdir + '/intg_summary_{}.tsv'.format(name)
output_figures = outdir + '/figures/{}_{{}}.{{}}'.format(name)
output_cluster_centroids = outdir + '/centroids_{}.pkl'.format(name)
DATA_DIR = '/cndd/fangming/CEMBA/data/MOp_all/data_freeze_neurons_subtypes_8mods_round4/sub5-1-1'
# fixed dataset configs
sys.path.insert(0, DATA_DIR)
from __init__datasets import *
meta_f = os.path.join(DATA_DIR, '{0}_metadata.tsv')
hvftrs_f = os.path.join(DATA_DIR, '{0}_hvfeatures.{1}')
hvftrs_gene = os.path.join(DATA_DIR, '{0}_hvfeatures.gene')
hvftrs_cell = os.path.join(DATA_DIR, '{0}_hvfeatures.cell')
# mods_selected = [
# 'snmcseq_gene',
# 'snatac_gene',
# 'smarter_cells',
# 'smarter_nuclei',
# '10x_cells_v2',
# '10x_cells_v3',
# '10x_nuclei_v3',
# '10x_nuclei_v3_macosko',
# ]
mods_selected = snmcseq_utils.import_single_textcol(os.path.join(DATA_DIR, 'datasets.txt'))
print(mods_selected)
features_selected = ['10x_cells_v2']
# check features
for features_modality in features_selected:
assert (features_modality in mods_selected)
# within modality
ps = {'mc': 0.9,
'atac': 0.1,
'rna': 0.7,
}
drop_npcs = {
'mc': 0,
'atac': 0,
'rna': 0,
}
# across modality
cross_mod_distance_measure = 'correlation' # cca
knn = 20
relaxation = 3
n_cca = 30
# PCA
npc = 50
# clustering
k = 30
resolutions = [0.1, 0.2, 0.5, 1,]
# umap
umap_neighbors = 30
min_dist = 0.5
| [
"fmxie1993@gmail.com"
] | fmxie1993@gmail.com |
1b151df5996d4076f15414d795cfa3a84fb2f0fb | 245b0329360b18c32510a6d13b2650fd6ca752cc | /ch03/bollingerbands.py | a0d6b4d3938e530e8988966fc965b098f922b4f2 | [] | no_license | TanUkkii007/numpy-begginers-guide | 56c315d207f681bd4e6d70abeac82bfc0db2bad5 | 6d483bc8672947a06d4240c4379f00183da46d8b | refs/heads/master | 2021-01-17T08:08:07.113571 | 2016-06-29T16:45:59 | 2016-06-29T16:45:59 | 61,984,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | import numpy as np
import matplotlib.pyplot as plt
N = 5
weights = np.ones(N)/N
print("Weights", weights)
c = np.loadtxt('data.csv', delimiter=',', usecols=(6,), unpack=True)
sma = np.convolve(weights, c)[N-1:-N+1]
deviation = []
C = len(c)
for i in range(N-1, C):
if i + N < C:
dev = c[i:i+N]
else:
dev = c[-N:]
averages = np.zeros(N)
averages.fill(sma[i-N-1])
dev = dev - averages
dev = dev ** 2
dev = np.sqrt(np.mean(dev))
deviation.append(dev)
deviation = 2 * np.array(deviation)
print("len(deviation), len(sma)", len(deviation), len(sma))
upperBB = sma + deviation
lowerBB = sma - deviation
c_slice = c[N-1:]
between_bands = np.where((c_slice < upperBB) & (c_slice > lowerBB))
print("lowerBB[between_bands]", lowerBB[between_bands])
print("c[between_bands]", c[between_bands])
print("upperBB[between_bands]", upperBB[between_bands])
between_bands = len(np.ravel(between_bands))
print("Ratio between bands", float(between_bands)/len(c_slice))
t = np.arange(N-1, C)
plt.plot(t, c_slice, lw=1.0, label='Data')
plt.plot(t, sma, '--', lw=2.0, label='Moving Average')
plt.plot(t, upperBB, '-.', lw=3.0, label='Upper Band')
plt.plot(t, lowerBB, ':', lw=4.0, label='Lower Band')
plt.title('Bollinger Bands')
plt.xlabel('Days')
plt.ylabel('Price ($)')
plt.grid()
plt.legend()
plt.show() | [
"yusuke.007.yasud@gmail.com"
] | yusuke.007.yasud@gmail.com |
d741f4dc2e31ea18208407ad1055cf75e11168f7 | b72dbc51279d3e59cb6410367b671f8a956314c1 | /leet_code/leet_221.py | 9b779dda89d3fccc750ce8919c485c685a4e267e | [] | no_license | ddobokki/coding-test-practice | 7b16d20403bb1714d97adfd1f47aa7d3ccd7ea4b | c88d981a1d43b986169f7884ff3ef1498e768fc8 | refs/heads/main | 2023-07-08T15:09:32.269059 | 2021-08-08T12:19:44 | 2021-08-08T12:19:44 | 344,116,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | from typing import List
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
max_width = -1
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
if matrix[i][j] != '0':
matrix[i][j] = min(int(matrix[i - 1][j]), int(matrix[i][j - 1]),int(matrix[i - 1][j - 1])) + 1
max_width = max(max_width, matrix[i][j])
if max_width == -1:
for m in matrix:
for n in m:
max_width = max(max_width,int(n))
return max_width** 2
for m in matrix:
print(m)
return max_width ** 2
Solution().maximalSquare([["1","1","1","1","0"],["1","1","1","1","0"],["1","1","1","1","1"],["1","1","1","1","1"],["0","0","1","1","1"]]) | [
"44228269+ddobokki@users.noreply.github.com"
] | 44228269+ddobokki@users.noreply.github.com |
2edb1344e87bae4019dff4d49c122f342bec658b | a3eb732ead7e1d10a85a88e42dc639eb16a40265 | /instagram_api/response/delete_comment.py | 1efaa064594ea87f89a874f1ebaa931c0ca1f435 | [
"MIT"
] | permissive | carsam2021/instagram_api | 7654c0f485c22935cf478016e46e65acbeda9344 | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | refs/heads/master | 2023-03-16T14:06:27.515432 | 2020-10-17T04:39:19 | 2020-10-17T04:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
__all__ = ['DeleteCommentResponse']
class DeleteCommentResponseInterface(ApiResponseInterface):
pass
class DeleteCommentResponse(ApiResponse, DeleteCommentResponseInterface):
pass
| [
"root@proscript.ru"
] | root@proscript.ru |
ef03a9fb1a196c9931d9f5f605aefff2c0fb7864 | 78166a623dce723406b6493990c1b23e0d6eec7b | /Sugar/Loss/LossLib/VGG_UNet.py | 663accc1b0233bf4b3b0aea7b2312be6d24b1d4e | [] | no_license | codybai/simple | c4f2cb1e6000d3932be825384d334b2ce0d42b7c | ca46fdf044f19ecbbb8f85d8a162c6777c6637b6 | refs/heads/master | 2020-03-28T00:05:27.281610 | 2018-09-04T16:31:22 | 2018-09-04T16:31:22 | 147,374,685 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | import torch.nn as nn
from Model.CreateModel import CreateModel
from Model.DIYLayer.VersaLayer import MinMaxNet
class VGG_UNet(nn.Module):
def __init__(self, model_str,cat_str):
super(VGG_UNet, self).__init__()
cm = CreateModel()
self.model = cm.Create(model_str)
if cat_str is not None:
self.cat_index = [int(x) for x in cat_str.split(',')]
else:
self.cat_index = []
def forward(self, feature_list,alpha = 0.5):
x = feature_list[0]
index = 1
for i,layer in enumerate(self.model):
x = layer(x)
if i in self.cat_index:
x=alpha*feature_list[index]+x*(1-alpha)
index+=1
return x
class VGG_mtmask6666(nn.Module):
def __init__(self, model_str,cat_str):
super(VGG_mtmask6666, self).__init__()
cm = CreateModel()
self.model = cm.Create(model_str)
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
if cat_str is not None:
self.cat_index = [int(x) for x in cat_str.split(',')]
else:
self.cat_index = []
def forward(self, x):
output = self.model(x)
mask = self.sigmoid(output)
return mask
class VGG_mtmask_minmax(nn.Module):
def __init__(self, model_str,cat_str):
super(VGG_mtmask_minmax, self).__init__()
cm = CreateModel()
self.model = cm.Create(model_str)
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
if cat_str is not None:
self.cat_index = [int(x) for x in cat_str.split(',')]
else:
self.cat_index = []
self.minmax = MinMaxNet(None,2)
self.conv = nn.Conv2d(2,1,3,1,1)
def forward(self, x):
output = self.model(x)
output = self.minmax(output)
output = output[:,0:1]-output[:,1:]
output = self.sigmoid(output)
return output | [
"codybai@163.com"
] | codybai@163.com |
7c836c73e3c1eb7c760cd77bfe4ae3a749e76df2 | a9c3db07c29a46baf4f88afe555564ed0d8dbf2e | /src/0827-expressive-words/expressive-words.py | 548f716bd6f863d15d4dc652abadcdc9118db5fd | [] | no_license | HLNN/leetcode | 86d2f5b390be9edfceadd55f68d94c78bc8b7644 | 35010d67341e6038ae4ddffb4beba4a9dba05d2a | refs/heads/master | 2023-03-13T16:44:58.901326 | 2023-03-03T00:01:05 | 2023-03-03T00:01:05 | 165,402,662 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | # Sometimes people repeat letters to represent extra feeling. For example:
#
#
# "hello" -> "heeellooo"
# "hi" -> "hiiii"
#
#
# In these strings like "heeellooo", we have groups of adjacent letters that are all the same: "h", "eee", "ll", "ooo".
#
# You are given a string s and an array of query strings words. A query word is stretchy if it can be made to be equal to s by any number of applications of the following extension operation: choose a group consisting of characters c, and add some number of characters c to the group so that the size of the group is three or more.
#
#
# For example, starting with "hello", we could do an extension on the group "o" to get "hellooo", but we cannot get "helloo" since the group "oo" has a size less than three. Also, we could do another extension like "ll" -> "lllll" to get "helllllooo". If s = "helllllooo", then the query word "hello" would be stretchy because of these two extension operations: query = "hello" -> "hellooo" -> "helllllooo" = s.
#
#
# Return the number of query strings that are stretchy.
#
#
# Example 1:
#
#
# Input: s = "heeellooo", words = ["hello", "hi", "helo"]
# Output: 1
# Explanation:
# We can extend "e" and "o" in the word "hello" to get "heeellooo".
# We can't extend "helo" to get "heeellooo" because the group "ll" is not size 3 or more.
#
#
# Example 2:
#
#
# Input: s = "zzzzzyyyyy", words = ["zzyy","zy","zyy"]
# Output: 3
#
#
#
# Constraints:
#
#
# 1 <= s.length, words.length <= 100
# 1 <= words[i].length <= 100
# s and words[i] consist of lowercase letters.
#
#
class Solution:
def expressiveWords(self, s: str, words: List[str]) -> int:
def l(v):
v = len(list(v))
return '{' + f'{"1," if v > 2 else ""}' + str(v) + '}'
p = ''.join(k + l(v) for k, v in groupby(s)) + '$'
res = 0
for w in words:
if re.match(p, w):
res += 1
return res
| [
"Huangln555@gmail.com"
] | Huangln555@gmail.com |
4c9e4283d802d77111f761068b39009db366dda8 | 23130cd12e38dbce8db8102810edaad70b240ae2 | /lintcode/798.1.py | 66348a1ab074920bacbe1db1eb7a63304721b742 | [
"MIT"
] | permissive | kangli-bionic/algorithm | ee6687c82101088db20f10fb958b4e45e97d3d31 | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | refs/heads/master | 2023-01-05T09:29:33.204253 | 2020-10-25T17:29:38 | 2020-10-25T17:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | """
798. Backpack VII
https://www.lintcode.com/problem/backpack-vii/description
Input: n = 8, prices = [3,2], weights = [300,160], amounts = [1,6]
Output: 640
Explanation: Buy the second rice(price = 2) use all 8 money.
n = 8 ->capacity
prices -> weight
weights -> value
amount:
processed_prices
processed_weight
m: type of goods
dp[i][j] = considering previous ith item, with it fill exactly j weight's max prices.
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - processed_prices[i]] + processed_weight[i])
initil condition:
dp[i][0] = 0
answer:
max(dp[len(processed_prices)])
calculation direction: i 0 -> len(processed_prices), j 0 -> n
2D Sliding Array Optimization
"""
class Solution:
"""
@param n: the money of you
@param prices: the price of rice[i]
@param weight: the weight of rice[i]
@param amounts: the amount of rice[i]
@return: the maximum weight
"""
def backPackVII(self, n, prices, weight, amounts):
# write your code here
m = len(prices)
processed_weight, processed_prices = [], []
for i in range(m):
for j in range(amounts[i]):
processed_weight.append(weight[i])
processed_prices.append(prices[i])
dp = [[0] * (n + 1) for _ in range(2)]
new, old = 0, 0
for i in range(1, len(processed_prices) + 1):
old = new
new = 1 - new
for j in range(1, n + 1):
dp[new][j] = dp[old][j]
if j >= processed_prices[i - 1]:
dp[new][j] = max(dp[new][j], dp[old][j - processed_prices[i - 1]] + processed_weight[i - 1])
return max(dp[new])
| [
"hipaulshi@gmail.com"
] | hipaulshi@gmail.com |
d1000634f0376aae2fddf9af842c4fc073cf72ac | 609ba9de2e578ff39e77bf224a50487ad62665ae | /python/xrayspectrumanalyzer/ui/console/XrayLineReferenceManager.py | 8714cb9a3a2d093b36e74bd341995523da8881fe | [
"Apache-2.0"
] | permissive | drix00/xray_spectrum_analyzer | 2d016818abb000584e184fe792974d0a39390f30 | fec0aee90ec051f7f517b5c81bf7ec6972dec3c6 | refs/heads/master | 2021-01-11T10:49:15.051568 | 2020-04-30T00:23:49 | 2020-04-30T00:23:49 | 76,188,049 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,345 | py | #!/usr/bin/env python
"""
.. py:currentmodule:: console.XrayLineReferenceManager
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
X-ray lines reference manager
"""
###############################################################################
# Copyright 2016 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
# Third party modules.
# Local modules.
import xrayspectrumanalyzer.tools.XRayTransitionData as XRayTransitionData
import xrayspectrumanalyzer.tools.ElementProperties as ElementProperties
# Project modules
# Globals and constants variables.
FRACTION_MINOR_MAJOR = 0.05
class XrayLineReferenceManager(object):
def __init__(self):
self._xrayData = XRayTransitionData.XRayTransitionData()
self._xrayData.readFiles()
self._elementSymbols = []
def addElement(self, symbol):
self._elementSymbols.append(symbol)
def getAbsorptionEdges(self):
absorptionEdges = []
for symbol in self._elementSymbols:
atomicNumber = ElementProperties.getAtomicNumberBySymbol(symbol)
shells = self._xrayData.getSubshell(atomicNumber)
for shell in shells:
position_eV = self._xrayData.getIonizationEnergy_eV(atomicNumber, shell)
position_keV = position_eV/1.0e3
label = "%s %s" % (symbol, shell)
absorptionEdges.append((position_keV, label))
return absorptionEdges
def getMajorLines(self):
majorLines = []
for symbol in self._elementSymbols:
positions_keV = set()
atomicNumber = ElementProperties.getAtomicNumberBySymbol(symbol)
transitions = self._xrayData.getTransition(atomicNumber, restricted=False)
for transition in transitions:
position_eV = self._xrayData.getTransitionEnergy_eV(atomicNumber, transition)
fraction = self._xrayData.getTransitionFraction(atomicNumber, transition)
position_keV = position_eV/1.0e3
label = "%s %s" % (symbol, transition)
if (fraction > FRACTION_MINOR_MAJOR and self.notSamePosition_keV(position_keV, positions_keV)):
majorLines.append((position_keV, fraction, label))
positions_keV.add(position_keV)
return majorLines
def notSamePosition_keV(self, position_keV, positions_keV):
for positionRef_keV in positions_keV:
if abs(position_keV - positionRef_keV) < 0.01:
return False
return True
def getLines(self, peaks):
lines = []
for peak in peaks:
symbol, peakLabel = peak
atomicNumber = ElementProperties.getAtomicNumberBySymbol(symbol)
transitions = self._xrayData.getTransition(atomicNumber, restricted=False)
transition = peakLabel
if transition in transitions:
position_eV = self._xrayData.getTransitionEnergy_eV(atomicNumber, transition)
fraction = self._xrayData.getTransitionFraction(atomicNumber, transition)
position_keV = position_eV/1.0e3
label = "%s %s" % (symbol, transition)
lines.append((position_keV, fraction, label))
return lines
def getMinorLines(self):
minorLines = []
for symbol in self._elementSymbols:
atomicNumber = ElementProperties.getAtomicNumberBySymbol(symbol)
transitions = self._xrayData.getTransition(atomicNumber, restricted=False)
for transition in transitions:
position_eV = self._xrayData.getTransitionEnergy_eV(atomicNumber, transition)
fraction = self._xrayData.getTransitionFraction(atomicNumber, transition)
position_keV = position_eV/1.0e3
label = "%s %s" % (symbol, transition)
if (fraction <= FRACTION_MINOR_MAJOR):
minorLines.append((position_keV, fraction, label))
return minorLines
def getSatelliteLines(self):
satelliteLines = []
for symbol in self._elementSymbols:
atomicNumber = ElementProperties.getAtomicNumberBySymbol(symbol)
transitions = self._xrayData.getTransition(atomicNumber, restricted=False)
for transition in transitions:
if transition.startswith('S') or 'satellite' in transition:
position_eV = self._xrayData.getTransitionEnergy_eV(atomicNumber, transition)
fraction = self._xrayData.getTransitionFraction(atomicNumber, transition)
position_keV = position_eV/1.0e3
label = "%s %s" % (symbol, transition)
satelliteLines.append((position_keV, fraction, label))
return satelliteLines
def getSiEscapePeaks(self):
siKaLineEnergy_keV = self._xrayData.getTransitionEnergy_eV(14, 'Ka1')/1.0e3
escapePeaks = []
for symbol in self._elementSymbols:
atomicNumber = ElementProperties.getAtomicNumberBySymbol(symbol)
transitions = self._xrayData.getTransition(atomicNumber, restricted=False)
for transition in transitions:
position_eV = self._xrayData.getTransitionEnergy_eV(atomicNumber, transition)
fraction = self._xrayData.getTransitionFraction(atomicNumber, transition)
position_keV = position_eV/1.0e3 - siKaLineEnergy_keV
label = "E %s %s - Si Ka" % (symbol, transition)
if (position_keV > siKaLineEnergy_keV and fraction > FRACTION_MINOR_MAJOR):
escapePeaks.append((position_keV, label))
return escapePeaks
| [
"hendrix.demers@mail.mcgill.ca"
] | hendrix.demers@mail.mcgill.ca |
660b800c9c7be6eb7ec614be5dde3d4f787f725d | 2a318f4c8372c75224b2d79106ef52d8f4375e71 | /python/rangefill_clear.py | a3e3f0acf1250f30d650360c1680cdc1ca8e161f | [] | no_license | keyur32/graph-snippets | 0d4bacc66b5fb0bbfddb73695fa61a5538eaf038 | e416d3ad86abdb30449325c06758e8cc6d73c137 | refs/heads/master | 2021-01-23T05:29:59.155567 | 2017-06-01T02:11:23 | 2017-06-01T02:11:23 | 92,971,791 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import http.client
conn = http.client.HTTPSConnection("graph.microsoft.com")
payload = "{ }"
headers = { 'content-type': "application/json" }
conn.request("POST", "/v1.0/me/drive/items/%7Bid%7D/workbook/names(%3Cname%3E)/range/format/fill/clear", payload, headers)
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
| [
"keyur32@hotmail.com"
] | keyur32@hotmail.com |
481998ada0c0ef78bbb4fa26730d9d10a3132ab9 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /work_early_eye_for_world/company/high_point/early_case_or_point/big_child_or_child.py | 92fde1477cfca10617fa6235b3dfd29ff53f3e8b | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
#! /usr/bin/env python
def big_year_and_other_place(str_arg):
part_or_fact(str_arg)
print('seem_point')
def part_or_fact(str_arg):
print(str_arg)
if __name__ == '__main__':
big_year_and_other_place('leave_man_on_few_day')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
58bcc20be0f3fa734329115202469d831c484347 | 87b7d7948aa51fdb4a27540240579788896369ea | /test/my_runs2/_sources/test 3_4e1e58686d7fae53f92701c4afae5f26.py | 41d0749e54f7f14992937b0f9a9ee928f0ba2dd9 | [] | no_license | Samuel-Levesque/Projet_GLO7030 | 6f13accd63b52107ec3e3a0b9b5f52edccda7c8d | 557bce3235f09723900f65c6e3b44a0ed9d2b519 | refs/heads/master | 2022-01-16T12:49:22.884798 | 2019-05-05T18:38:35 | 2019-05-05T18:38:35 | 177,038,991 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | from numpy.random import permutation
from sklearn import svm, datasets
from sacred import Experiment
from sacred.observers import FileStorageObserver,MongoObserver
ex = Experiment('iris_rbf_svm')
# ex.observers.append(MongoObserver.create("mongodb+srv://user1:gY5fVQ1CwDL5SDjJ@cluster0-zhhvs.mongodb.net/test?retryWrites=true"))
# ex.observers.append(MongoObserver.create('mongodb://localhost:27017/'))
ex.observers.append(FileStorageObserver.create('my_runs2'))
@ex.config
def cfg():
list_C = [1.4,1.7]
list_gamma = [0.4,0.6]
def essai_model(C,gamma):
clf = svm.SVC(C, 'rbf', gamma=gamma)
return clf
@ex.automain
def run(list_C, list_gamma):
iris = datasets.load_iris()
per = permutation(iris.target.size)
iris.data = iris.data[per]
iris.target = iris.target[per]
print("bonjour")
for C, gamma in zip(list_C,list_gamma):
print(C)
print(gamma)
clf = essai_model(C,gamma)
clf.fit(iris.data[:90],iris.target[:90])
print("epoch numero 3")
score=clf.score(iris.data[90:],iris.target[90:])
ex.log_scalar("acc", score)
print(score)
return score
| [
"44324703+William-Bourget@users.noreply.github.com"
] | 44324703+William-Bourget@users.noreply.github.com |
cdd0c2844caaf787286b6166b7f777396e5fe250 | a06b1f68a43622c21b1dbdd8680f21d588a45219 | /setup.py | 156e23e300442138585f63c8fa6360e0ffc6b4d0 | [
"BSD-2-Clause"
] | permissive | i5misswrong/meshless | 6eac7e7ddbe51160ee37358ce36525b26b6c6843 | 27f9729050cedec2d7c1a716104d068608827c0f | refs/heads/master | 2021-01-15T22:51:33.502229 | 2017-07-05T13:59:17 | 2017-07-05T13:59:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | from __future__ import absolute_import
import os
import inspect
import subprocess
from setuptools import setup, find_packages
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
git_revision = out.strip().decode('ascii')
except OSError:
git_revision = "Unknown"
return git_revision
def get_version_info(version, is_released):
fullversion = version
if not is_released:
git_revision = git_version()
fullversion += '.dev0+' + git_revision[:7]
return fullversion
def write_version_py(version, is_released, filename='compmech/version.py'):
fullversion = get_version_info(version, is_released)
with open("./meshless/version.py", "wb") as f:
f.write(b'__version__ = "%s"\n' % fullversion.encode())
return fullversion
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
setupdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
return open(os.path.join(setupdir, fname)).read()
#_____________________________________________________________________________
install_requires = [
"numpy",
"scipy",
"coveralls",
"pyNastran",
"setuptools-git-version",
]
if os.environ.get('TRAVIS') == 'true':
install_requires.pop(install_requires.index("pyNastran"))
CLASSIFIERS = """\
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
Intended Audience :: Education
Topic :: Scientific/Engineering :: Mathematics
License :: OSI Approved :: BSD License
Operating System :: Microsoft :: Windows
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.5
Operating System :: Unix
"""
is_released = True
version = '0.1.19'
fullversion = write_version_py(version, is_released)
data_files = [('', [
'README.md',
'LICENSE',
'meshless/version.py',
])]
package_data = {
'': ['tests/*.*'],
}
s = setup(
name = "meshless",
version = fullversion,
author = "Saullo G. P. Castro",
author_email = "castrosaullo@gmail.com",
description = ("Meshless Methods for Computational Mechanics"),
license = "BSD",
keywords = "es-pim finite element partial diferential equations",
url = "https://github.com/compmech/meshless",
packages=find_packages(),
package_data=package_data,
data_files=data_files,
long_description=read('README.md'),
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
install_requires=install_requires,
)
| [
"saullogiovani@gmail.com"
] | saullogiovani@gmail.com |
63e03d22c64135796dcbbadcdebb839599a309af | 9a46784244d544445c01c6f0d564f4da65efcfaf | /CodeUltimateFlaskCourse/13. Flask-Login/login_create_form/app.py | 7783a369710f1649665c1d06e1b5379ed4d7a82b | [] | no_license | ammbyrne/Flask | f55a606ec234c6a00b4d264a48e11b2f487d4ef7 | 7922ab46b8a4c388346043d2393173e7e49e43bb | refs/heads/main | 2023-04-19T16:07:08.224824 | 2021-05-07T03:21:44 | 2021-05-07T03:21:44 | 365,101,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from flask import Flask, render_template, request
from flask_login import LoginManager, UserMixin, login_user, login_required, current_user, logout_user
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY'] = 'Thisisasecret!'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////mnt/c/Users/antho/Documents/login_example/login.db'
login_manager = LoginManager(app)
db = SQLAlchemy(app)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(30), unique=True)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
user = User.query.filter_by(username=username).first()
if not user:
return '<h1>User does not exist!</h1>'
login_user(user)
return '<h1>You are now logged in!</h1>'
return render_template('login.html')
@app.route('/home')
@login_required
def home():
return '<h1>You are in the protected area, {}!</h1>'.format(current_user.username)
@app.route('/logout')
@login_required
def logout():
logout_user()
return '<h1>You are now logged out!</h1>'
if __name__ == '__main__':
app.run(debug=True) | [
"andy_m_byrne@yahoo.co.uk"
] | andy_m_byrne@yahoo.co.uk |
30834878669b80bbcfb9f89279b8d3a82103d9e4 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/resources/v20191001/get_deployment_at_subscription_scope.py | 2aa46153ad6ca3f33ca2ff7ee70b5c461ebd56ce | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,139 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetDeploymentAtSubscriptionScopeResult',
'AwaitableGetDeploymentAtSubscriptionScopeResult',
'get_deployment_at_subscription_scope',
]
@pulumi.output_type
class GetDeploymentAtSubscriptionScopeResult:
"""
Deployment information.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the deployment.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
the location of the deployment.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the deployment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.DeploymentPropertiesExtendedResponse':
"""
Deployment properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Deployment tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the deployment.
"""
return pulumi.get(self, "type")
class AwaitableGetDeploymentAtSubscriptionScopeResult(GetDeploymentAtSubscriptionScopeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDeploymentAtSubscriptionScopeResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_deployment_at_subscription_scope(deployment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeploymentAtSubscriptionScopeResult:
"""
Deployment information.
:param str deployment_name: The name of the deployment.
"""
__args__ = dict()
__args__['deploymentName'] = deployment_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:resources/v20191001:getDeploymentAtSubscriptionScope', __args__, opts=opts, typ=GetDeploymentAtSubscriptionScopeResult).value
return AwaitableGetDeploymentAtSubscriptionScopeResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
69596b8782b06a6760a81282c05936b8831b8d23 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/pose_estimation/MSPN/exps/mspn.2xstg.coco/train.py | d5dce9ff26b721b013de30309d9d4fd092d0f95f | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 5,856 | py | # encoding: utf-8
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
@author: Wenbo Li
@contact: fenglinglwb@gmail.com
"""
import argparse
import time
import torch
from tensorboardX import SummaryWriter
from cvpack.torch_modeling.engine.engine import Engine
from cvpack.utils.pyt_utils import ensure_dir
from config import cfg
from network import MSPN
from lib.utils.dataloader import get_train_loader
from lib.utils.solver import make_lr_scheduler, make_optimizer
############## apex modify 2 begin #################
from apex import amp
import os
PERF_MODE = os.getenv('PERF_MODE', False)
############## apex modift 2 end #################
def main():
parser = argparse.ArgumentParser()
with Engine(cfg, custom_parser=parser) as engine:
logger = engine.setup_log(
name='train', log_dir=cfg.OUTPUT_DIR, file_name='log.txt')
args = parser.parse_args()
ensure_dir(cfg.OUTPUT_DIR)
model = MSPN(cfg, run_efficient=cfg.RUN_EFFICIENT)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
num_gpu = len(engine.devices)
# default num_gpu: 8, adjust iter settings
cfg.SOLVER.CHECKPOINT_PERIOD = \
int(cfg.SOLVER.CHECKPOINT_PERIOD * 8 / num_gpu)
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * 8 / num_gpu)
optimizer = make_optimizer(cfg, model, num_gpu)
scheduler = make_lr_scheduler(cfg, optimizer)
###### apex modify 1 begin##########
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", loss_scale=128.0, combine_grad=True)
###### apex modify 1 end ##########
engine.register_state(
scheduler=scheduler, model=model, optimizer=optimizer)
if engine.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank],
broadcast_buffers=False, )
if engine.continue_state_object:
engine.restore_checkpoint(is_restore=False)
else:
if cfg.MODEL.WEIGHT:
engine.load_checkpoint(cfg.MODEL.WEIGHT, is_restore=False)
data_loader = get_train_loader(cfg, num_gpu=num_gpu, is_dist=engine.distributed)
# ------------ do training ---------------------------- #
logger.info("\n\nStart training with pytorch version {}".format(
torch.__version__))
max_iter = len(data_loader)
if PERF_MODE :
max_iter = 100
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
tb_writer = SummaryWriter(cfg.TENSORBOARD_DIR)
model.train()
time1 = time.time()
for iteration, (images, valids, labels) in enumerate(
data_loader, engine.state.iteration):
iteration = iteration + 1
images = images.to(device)
valids = valids.to(device)
labels = labels.to(device)
loss_dict = model(images, valids, labels)
losses = sum(loss for loss in loss_dict.values())
optimizer.zero_grad()
################ apex modify 3 begin ##############
with amp.scale_loss(losses, optimizer) as scaled_loss:
scaled_loss.backward()
################ apex modify 3 end ##############
optimizer.step()
if cfg.RUN_EFFICIENT:
del images, valids, labels, losses
if engine.local_rank == 0:
if iteration % 20 == 0 or iteration == max_iter:
log_str = 'Iter:%d, LR:%.1e, ' % (
iteration, optimizer.param_groups[0]["lr"] / num_gpu)
for key in loss_dict:
tb_writer.add_scalar(
key, loss_dict[key].mean(), global_step=iteration)
log_str += key + ': %.3f, ' % float(loss_dict[key])
########## FPS Modification Begin########################
time2 = time.time()
elapsed_time = time2 - time1
time1 = time2
FPS= images.shape[0] / elapsed_time * num_gpu * 20
log_str += 'FPS: %.3f,' % float(FPS)
###########FPS Modification End ########################
required_time = elapsed_time / 20 * (max_iter - iteration)
hours = required_time // 3600
mins = required_time % 3600 // 60
log_str += 'To Finish: %dh%dmin,' % (hours, mins)
logger.info(log_str)
scheduler.step()
if iteration % checkpoint_period == 0 or iteration == max_iter:
engine.update_iteration(iteration)
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(cfg.OUTPUT_DIR)
elif not engine.distributed:
engine.save_and_link_checkpoint(cfg.OUTPUT_DIR)
if iteration >= max_iter:
logger.info('Finish training process!')
break
if __name__ == "__main__":
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
52ca0d7034a10d2e5064f8a4df166a7f6b942a3b | 551be15fdad2e1c5cbb6158ce20e165eaadd87bd | /corc/providers/dummy.py | cb53e3fedaf0de4f02e6ff70fee63a9e2199d539 | [
"MIT"
] | permissive | frankfanslc/corc | b230d0f14ca80d5c175a048921a75ed4a0eae0f5 | 2d2ba92ab791f50fa46e1ff2cdc0035925032671 | refs/heads/master | 2023-08-21T22:46:05.987006 | 2021-02-28T16:12:37 | 2021-02-28T16:12:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import uuid
from corc.config import default_config_path
from corc.orchestrator import Orchestrator
from corc.util import ping
class LocalOrchestrator(Orchestrator):
def __init__(self, options):
super().__init__(options)
self.instance = None
self.resource_id = None
def endpoint(self, select=None):
return "127.0.0.1"
def poll(self):
target_endpoint = self.endpoint()
if target_endpoint:
if ping(target_endpoint):
self._is_reachable = True
def setup(self, resource_config=None):
# Since it is local, it is already setup
if not self.instance:
self.instance = True
if not self.resource_id:
self.resource_id = str(uuid.uuid4())
self._is_ready = True
def get_resource(self):
return self.resource_id, self.instance
def tear_down(self):
self._is_ready = False
@classmethod
def load_config_options(cls, provider="", path=default_config_path):
return {}
@classmethod
def make_resource_config(cls, **kwargs):
return {}
@classmethod
def validate_options(cls, options):
if not isinstance(options, dict):
raise TypeError("options is not a dictionary")
| [
"munk1@live.dk"
] | munk1@live.dk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.