blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6b58e96fe19f7fa179d9b9171e094bdc16c019b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04012/s970832368.py | c7840201312991083417648a32c75345c801a6aa | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | s = input()
check = 0
flag = True
for i in range(97,123):
check =s.count(chr(i))
if check % 2 == 1 :
flag = False
if flag:
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
94b8ca4b415bdc70d33447a12a4fcf48e709706b | cbcf71723d9e2ddee84ebc17bdfafe6eadd8aa73 | /backup/courses/migrations/0002_auto_20180330_2003.py | 1307dcd3b5898c9790908e19defb5f07b651bedf | [] | no_license | tanveerahmad1517/coursewebsit | 641ea451597ee910cd4c7399d0069b9c1f195ef6 | f518252cd90aeca188cc9a75c8e554ffcf395254 | refs/heads/master | 2020-03-10T00:51:18.024567 | 2018-11-07T14:47:45 | 2018-11-07T14:47:45 | 129,093,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | # Generated by Django 2.0 on 2018-03-30 15:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courses', to=settings.AUTH_USER_MODEL),
),
]
| [
"tanveerobjects@gmail.com"
] | tanveerobjects@gmail.com |
bca551e720e8bf05673e6ec94d184ff22c6360db | f677ec7dafd40b4d1e9316275372f80a2d2a473e | /lampost/mud/action.py | 9a236ed6094e4d93274946a428f2d4d611acf9e6 | [
"MIT"
] | permissive | cookiezeater/Lampost-Mud | f5ac08e4bda5e4cdfa53f8147c5975fd23137c60 | 6f6adf0bdf5c6334484b9dc23e9e3d235e5df155 | refs/heads/master | 2021-01-15T23:06:30.678062 | 2015-09-04T21:19:42 | 2015-09-04T21:19:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | from collections import defaultdict
from lampost.context import resource
from lampost.gameops.action import make_action, convert_verbs, ActionError
resource.m_requires(__name__, 'log')
_mud_actions = {}
resource.register('mud_actions', _mud_actions)
imm_actions = set()
def mud_action(verbs, msg_class=None, **kwargs):
def dec_wrapper(func):
action = make_action(func, msg_class=msg_class, **kwargs)
for verb in convert_verbs(verbs):
if verb in _mud_actions:
error("Adding mud action for existing verb {}", verb)
else:
_mud_actions[verb] = action
return dec_wrapper
def imm_action(verbs, msg_class=None, imm_level='builder', **kwargs):
def dec_wrapper(func):
imm_actions.add(func)
func.imm_level = imm_level
return make_action(func, verbs, msg_class, **kwargs)
return dec_wrapper
@mud_action('help')
def help_action(source, args, **_):
if not args:
source.display_line('Available actions:')
action_verbs = defaultdict(list)
for verb, action in _mud_actions.items():
action_verbs[action].append(" ".join(list(verb)))
verb_lists = ["/".join(verbs) for verbs in action_verbs.values()]
return source.display_line(", ".join(sorted(verb_lists)))
action = _mud_actions.get(args, None)
if not action:
raise ActionError("No matching command found")
return getattr(action, "help_text", "No help available.")
| [
"genzgd@gmail.com"
] | genzgd@gmail.com |
0865b23f84b6c6bbcd0c3dfd225ae579f6bbcd40 | 26a432237152b92c0547f5c63a1e035920fd7582 | /blog/models.py | 35d0406b8ee7dfcbe2370c0ef2694e9f4596b996 | [] | no_license | kijames7/DjangoBlog | c1164eedaf7de7cb2fcc3c06a89a141d6a9bde39 | d6525566f331d27b03c9395aa669032cc408f7de | refs/heads/master | 2021-01-19T19:44:36.225008 | 2016-04-01T07:53:24 | 2016-04-01T07:53:24 | 55,214,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | from django.db import models
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=50)
email = models.EmailField(unique=True)
bio = models.TextField()
def __str__(self):
return self.name
class Category(models.Model):
cat_name = models.CharField('category name',max_length=50)
cat_description = models.CharField('category description',max_length=255)
#fix plural for category
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
return self.cat_name
class Tag(models.Model):
tag_name = models.CharField(max_length=50)
tag_description = models.CharField(max_length=255)
def __str__(self):
return self.tag_name
class Post(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
created_date = models.DateTimeField(auto_now_add=True, auto_now=False)
updated_date = models.DateTimeField(auto_now_add=False, auto_now=True)
author = models.ForeignKey(Author)
categories = models.ManyToManyField(Category)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.title | [
"james@Jamess-MacBook-Pro.local"
] | james@Jamess-MacBook-Pro.local |
1c228b89a3c6afe2a800d1307ceca5feab9e6c47 | 3a8c2bd3b8df9054ed0c26f48616209859faa719 | /Challenges/memorizePhoneNumber.py | a236e27e9f8438c0b6733f6fba0a318e1ca8380e | [] | no_license | AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges | 684f1ca2f9ee3c49d0b17ecb1e80707efe305c82 | 98fb752c574a6ec5961a274e41a44275b56da194 | refs/heads/master | 2023-09-01T23:58:15.514231 | 2021-09-10T12:42:03 | 2021-09-10T12:42:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,208 | py | """
You are given a phone number as an array of n digits. To help you memorize the number, you want to divide it into groups of contiguous digits. Each group must contain exactly 2 or 3 digits. There are 3 kinds of groups:
Excellent: A group that contains only the same digits. For example, 000 or 77.
Good: A group of 3 digits, 2 of which are the same. For example, 030, 229 or 166.
Usual: A group in which all the digits are distinct. For example, 123 or 90.
The quality of a group assignment is defined as 2 × (number of excellent groups) + (number of good groups). Divide the phone number into groups such that the quality is maximized.
"""
def getQuality(nums, startIndex, groupLength):
if startIndex + groupLength > len(nums):
raise ValueError('startIndex + groupLength > len(nums)')
if groupLength == 2:
if nums[startIndex] == nums[startIndex+1]:
return 2
else:
return 0
elif groupLength == 3:
if all(nums[i] == nums[i+1] for i in range(startIndex, startIndex + groupLength - 1)):
return 2
elif any(nums[i] == nums[j] for i in range(startIndex, startIndex + groupLength - 1) for j in range(i + 1, startIndex + groupLength)):
return 1
else:
return 0
else:
raise ValueError('groupLength must equal 2 or 3')
def memorize(phoneNumber):
# Returns the groups of a phone number such that the memorization quality is maximized.
# Each group must contain exactly 2 or 3 digits. There are 3 kinds of groups:
# Excellent: A group that contains only the same digits. For example, 000 or 77.
# Good: A group of 3 digits, 2 of which are the same. For example, 030, 229 or 166.
# Usual: A group in which all the digits are distinct. For example, 123 or 90.
# The quality of a group assignment is defined as 2 × (number of excellent groups) + (number of good groups)
# If the phone numbers were large or space was a concern we do not need to store whole array - just last three indexes
if len(phoneNumber) < 3:
return getQuality(phoneNumber, 0, len(phoneNumber)), [[n for n in phoneNumber]]
maxQuality = [0 for num in phoneNumber]
bestGroups = [None for num in phoneNumber]
bestGroups[0] = [[phoneNumber[0]]]
maxQuality[1] = getQuality(phoneNumber, 0, 2)
bestGroups[1] = [[phoneNumber[i] for i in range(2)]]
maxQuality[2] = getQuality(phoneNumber, 0, 3)
bestGroups[2] = [[phoneNumber[i] for i in range(3)]]
for i in range(3, len(phoneNumber)):
firstOption = maxQuality[i-2] + getQuality(phoneNumber, i-1, 2)
secondOption = maxQuality[i-3] + getQuality(phoneNumber, i-2, 3)
if firstOption > secondOption:
maxQuality[i] = firstOption
bestGroups[i] = bestGroups[i-2] + [[phoneNumber[j] for j in range(i-1, i+1)]]
else:
maxQuality[i] = secondOption
bestGroups[i] = bestGroups[i-3] + [[phoneNumber[j] for j in range(i-2, i+1)]]
return maxQuality[len(phoneNumber) - 1], bestGroups[len(phoneNumber) - 1]
# https://leetcode.com/discuss/interview-question/363871/google-memorize-phone-number
from functools import lru_cache
def memorizePhoneNumber(s):
n = len(s)
score = lambda t: len(t) - len(set(t))
# helper computes the largest score and the spliting indices of s[i:]
@lru_cache(n)
def helper(i):
val, bounds = (0, (n,)) if i == n else (-float('inf'), (n,))
for step in [2, 3]:
if i + step <= n:
(v, b), r = helper(i + step), score(s[i:i + step])
if r + v >= val:
val, bounds = r + v, (i,) + b
return (val, bounds)
val, bounds = helper(0)
return ''.join([f'({s[i:j]})' for i, j in zip(bounds, bounds[1:])])
s = '1233445556'
print(s, '(12)(33)(445)(556)')
print(memorizePhoneNumber(s))
s = '12334455566'
print(s, '(12)(33)(44)(555)(66)')
print(memorizePhoneNumber(s))
if __name__ == '__main__':
print(memorize([1,2,3,4,5,6,7,8]))
print(memorize([3,3,3,0,0,2,1]))
print(memorize('1233445556'))
print(memorize('12334455566'))
print(memorize('12'))
print(memorize('122'))
| [
"bennyhwanggggg@users.noreply.github.com"
] | bennyhwanggggg@users.noreply.github.com |
3dc803c89b593500cc45937f31676e0ed30e8c86 | 9b113d2f759432c147ba880f04ff515c6739d2ac | /thunder/accounts/migrations/0002_user_is_staff.py | 7bfaae1095ff2d5e5ac50a7f1c9ca31afe459922 | [
"Apache-2.0"
] | permissive | liuansen/django-demo | eec88fc4b31ff960463b29746988ae12c9ef2bdd | 7f13e723a7ec1f0689e40de67cc6a6b308f44ba9 | refs/heads/master | 2022-11-26T17:38:09.650913 | 2019-06-17T07:39:27 | 2019-06-17T07:39:27 | 192,091,184 | 1 | 0 | Apache-2.0 | 2022-11-22T00:30:48 | 2019-06-15T14:59:30 | Python | UTF-8 | Python | false | false | 561 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-07 07:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='\u662f\u5426\u662f\u804c\u5458'),
),
]
| [
"616833686@qq.com"
] | 616833686@qq.com |
7f68a729966f3d085ba517c4e657499511e876b9 | 9c016161968de87315f1d5c7b7762194bd5e577b | /uproot_methods/classes/TVector2.py | 75ec5dc7510ca431af9a5bde7c64c5da194470c1 | [
"BSD-3-Clause"
] | permissive | kratsg/uproot-methods | ee285f899f743c4c214998b34ecc6b0f12d31e4e | ee773ce3d5e6aef401dc42d282e6296c3b6a42d0 | refs/heads/master | 2020-03-29T04:08:37.130974 | 2018-09-19T22:14:14 | 2018-09-19T22:14:14 | 149,517,357 | 0 | 0 | BSD-3-Clause | 2018-09-19T22:04:44 | 2018-09-19T22:04:43 | null | UTF-8 | Python | false | false | 6,724 | py | #!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numbers
import awkward
import awkward.util
import uproot_methods.common.TVector
import uproot_methods.base
class Common(object):
def dot(self, other):
out = self.x*other.x
out = out + self.y*other.y
return out
# TODO:
# def _rotate(self, angle)
class ArrayMethods(Common, uproot_methods.common.TVector.ArrayMethods, uproot_methods.base.ROOTMethods):
def _initObjectArray(self, table):
awkward.ObjectArray.__init__(self, table, lambda row: TVector2(row["fX"], row["fY"]))
self.content.rowname = "TVector2"
@property
def x(self):
return self["fX"]
@property
def y(self):
return self["fY"]
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if method != "__call__":
return NotImplemented
inputs = list(inputs)
for i in range(len(inputs)):
if isinstance(inputs[i], awkward.util.numpy.ndarray) and inputs[i].dtype == awkward.util.numpy.dtype(object) and len(inputs[i]) > 0:
idarray = awkward.util.numpy.frombuffer(inputs[i], dtype=awkward.util.numpy.uintp)
if (idarray == idarray[0]).all():
inputs[i] = inputs[i][0]
if ufunc is awkward.util.numpy.add or ufunc is awkward.util.numpy.subtract:
if not all(isinstance(x, (ArrayMethods, Methods)) for x in inputs):
raise TypeError("(arrays of) TVector2 can only be added to/subtracted from other (arrays of) TVector2")
out = self.empty_like()
out["fX"] = getattr(ufunc, method)(*[x.x for x in inputs], **kwargs)
out["fY"] = getattr(ufunc, method)(*[x.y for x in inputs], **kwargs)
return out
elif ufunc is awkward.util.numpy.power and len(inputs) >= 2 and isinstance(inputs[1], (numbers.Number, awkward.util.numpy.number)):
if inputs[1] == 2:
return self.mag2()
else:
return self.mag2()**(0.5*inputs[1])
elif ufunc is awkward.util.numpy.absolute:
return self.mag()
else:
return awkward.ObjectArray.__array_ufunc__(self, ufunc, method, *inputs, **kwargs)
class Methods(Common, uproot_methods.common.TVector.Methods, uproot_methods.base.ROOTMethods):
_arraymethods = ArrayMethods
@property
def x(self):
return self._fX
@property
def y(self):
return self._fY
def __repr__(self):
return "TVector2({0:.5g}, {1:.5g})".format(self.x, self.y)
def __str__(self):
return repr(self)
def __eq__(self, other):
return isinstance(other, Methods) and self.x == other.x and self.y == other.y
def _scalar(self, operator, scalar, reverse=False):
if not isinstance(scalar, (numbers.Number, awkward.util.numpy.number)):
raise TypeError("cannot {0} a TVector2 with a {1}".format(operator.__name__, type(scalar).__name__))
if reverse:
return TVector2(operator(scalar, self.x), operator(scalar, self.y))
else:
return TVector2(operator(self.x, scalar), operator(self.y, scalar))
def _vector(self, operator, vector, reverse=False):
if not isinstance(vector, Methods):
raise TypeError("cannot {0} a TVector2 with a {1}".format(operator.__name__, type(vector).__name__))
if reverse:
return TVector2(operator(vector.x, self.x), operator(vector.y, self.y))
else:
return TVector2(operator(self.x, vector.x), operator(self.y, vector.y))
def _unary(self, operator):
return TVector2(operator(self.x), operator(self.y))
class TVector2Array(ArrayMethods, awkward.ObjectArray):
def __init__(self, x, y):
self._initObjectArray(awkward.Table())
self["fX"] = x
self["fY"] = y
@classmethod
def origin(cls, shape, dtype=None):
if dtype is None:
dtype = awkward.util.numpy.float64
return cls(awkward.util.numpy.zeros(shape, dtype=dtype), awkward.util.numpy.zeros(shape, dtype=dtype))
@classmethod
def origin_like(cls, array):
return cls.origin(array.shape, array.dtype)
@classmethod
def from_circular(cls, rho, phi):
return cls(rho * awkward.util.numpy.cos(phi),
rho * awkward.util.numpy.sin(phi))
@property
def x(self):
return self["fX"]
@x.setter
def x(self, value):
self["fX"] = value
@property
def y(self):
return self["fY"]
@y.setter
def y(self, value):
self["fY"] = value
class TVector2(Methods):
def __init__(self, x, y):
self._fX = x
self._fY = y
@classmethod
def origin(cls):
return cls(0.0, 0.0)
@classmethod
def from_circular(cls, rho, phi):
return cls(rho * math.cos(phi),
rho * math.sin(phi))
@property
def x(self):
return self._fX
@x.setter
def x(self, value):
self._fX = value
@property
def y(self):
return self._fY
@y.setter
def y(self, value):
self._fY = value
| [
"jpivarski@gmail.com"
] | jpivarski@gmail.com |
48693be5723d40ee6ade1a3e36e0bc456d8743aa | 7507968c068ac84321f528d475b213fe3699c479 | /app/scripts/set-jupyter-password.py | b28787f3425b70f98cd6af5699cce52d64daf30e | [
"Apache-2.0"
] | permissive | slifty/esper | 4f734b453db1cc9d8b9c9df815447efc099065af | 12e9c43a4bc11547073c04583cc4163409d127f9 | refs/heads/master | 2021-08-29T23:10:50.581417 | 2017-12-15T07:39:15 | 2017-12-15T07:39:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | import pexpect
import os
if 'JUPYTER_PASSWORD' not in os.environ:
exit()
JUPYTER_DIR = '/root/.jupyter'
if not os.path.isdir(JUPYTER_DIR):
os.mkdir(JUPYTER_DIR)
p = pexpect.spawn('jupyter notebook password')
p.expect('Enter password: ')
p.sendline(os.environ['JUPYTER_PASSWORD'])
p.sendline(os.environ['JUPYTER_PASSWORD'])
p.read()
| [
"wcrichto@cs.stanford.edu"
] | wcrichto@cs.stanford.edu |
48b3699c37ed955959b9eb823a7520bb10faf1c5 | 7c188319690472dfec23d8fd78c979c1f291c70c | /coincidence_calculation_dag.py | c8ed221edd45c1fbd6278b1a88e1290052a95100 | [] | no_license | bhokansonfasig/pycondor_scripts | 705c79c4ad80582ae20a575192c347823ad6baa4 | c85a3e0497cc824866a4fe7c32016a2d276d8759 | refs/heads/master | 2021-01-09T05:26:57.053873 | 2020-11-18T23:35:52 | 2020-11-18T23:35:52 | 80,770,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,980 | py | #! /usr/bin/env python
#
# coincidence_calculation_dag.py
# Script for submitting many coincidence_calculator scripts to HTCondor in a dagman
#
#
# Ben Hokanson-Fasig
# Created 03/11/18
# Last edit 03/11/18
#
from __future__ import division, print_function
# Standard libraries
import argparse
import os.path
# Custom libraries
from pycondor import Job, Dagman
default_energies = ["1e8", "1e9", "1e10"]
# Parse command line arguments
parser = argparse.ArgumentParser(description="Script for submitting many "+
"detector simulation scripts to HTCondor "+
"in a dagman.")
parser.add_argument('--iterations', type=int, default=1,
help="Number of iterations to run (each script with "+
"different options is submitted this many times)")
parser.add_argument('--energies', nargs='+', default=default_energies,
help="Energies over which to run simulations "+
"(defaults to 3 energies from 1e8 to 1e10)")
parser.add_argument('--maxjobs', type=int, default=0,
help="Maximum number of jobs to submit at once "+
"(default no limit)""")
parser.add_argument('-v', '--verbose', action="store_true",
help="If present, print all debugging messages from "+
"pycondor")
parser.add_argument('--args', nargs=argparse.REMAINDER,
help="Additional arguments beyond this are passed on "+
"to the script""")
args = parser.parse_args()
# Set script and name
script_file = "/home/fasig/scalable_radio_array/coincidence_calculator.sh"
descriptive_name = "coincidence_calculation_"+args.args[0]
if "-n" in args.args:
descriptive_name += "_n"+args.args[args.args.index("-n")+1]
else:
descriptive_name += "_n10"
descriptive_name += "x"+str(args.iterations)
zfill_amount = len(str(args.iterations-1))
output_index = -1
if "-o" in args.args:
output_index = args.args.index("-o") + 1
output_name = os.path.basename(args.args[output_index])
output_dirname = os.path.dirname(args.args[output_index])
# Declare the error, output, log, and submit directories for Condor job
error = '/scratch/fasig/pycondor'
output = '/scratch/fasig/pycondor'
log = '/scratch/fasig/pycondor'
submit = '/scratch/fasig/pycondor'
# Set up the PyCondor Dagman
dag = Dagman(descriptive_name, submit=submit, verbose=2 if args.verbose else 0)
# Add arguments to jobs
for energy in args.energies:
for i in range(args.iterations):
transfer_files = []
file_remaps = []
if output_index!=-1:
replaced_name = output_name.replace("ENERGY", energy)
replaced_name = replaced_name.replace("ITERATION",
str(i).zfill(zfill_amount))
args.args[output_index] = replaced_name
transfer_files.append(replaced_name)
file_remaps.append(replaced_name+'='+
os.path.join(output_dirname, replaced_name))
job = Job(descriptive_name+"_"+energy+"_"+str(i).zfill(zfill_amount),
executable=script_file, output=output, error=error,
log=log, submit=submit, #request_memory="5GB",
extra_lines=["should_transfer_files = YES",
"transfer_output_files = "+
", ".join(transfer_files),
'transfer_output_remaps = "'+
'; '.join(file_remaps)+'"',
"when_to_transfer_output = ON_EXIT"],
verbose=2 if args.verbose else 0)
job.add_arg(" ".join([energy]+args.args))
dag.add_job(job)
# Write all necessary submit files and submit dagman to Condor
if args.maxjobs>0:
dag.build_submit(submit_options="-maxjobs "+str(args.maxjobs))
else:
dag.build_submit()
| [
"bhokansonfasig@gmail.com"
] | bhokansonfasig@gmail.com |
428c92868d4e1a7b4335596b00b796c1b7ddeb20 | 1603f7bdaf8d5e8ba611598ee928e07eb2b2d1c0 | /IT-Lab/assignment-4/codes/3.py | 0ed47e919fe6ff8a42b25f07a843f13275dfb80f | [] | no_license | bawilliams3/college-assignments | 7105aa8f427288eb74f1cc98efddf2010b27f04f | 0ca732c46bdd6e7efe672b91f5df864669ab4766 | refs/heads/master | 2023-02-10T19:39:03.000975 | 2021-01-06T10:01:29 | 2021-01-06T10:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | def init(str):
a = str[0].upper()
for i in range(1, len(str)):
if str[i] == " ":
print(a, str[i + 1].upper())
str = "Ayush Dubey"
init(str)
| [
"ayushdubey70@gmail.com"
] | ayushdubey70@gmail.com |
ec2cf3c5de47ed3d15a2e35ce709010a06765dc6 | bc441bb06b8948288f110af63feda4e798f30225 | /metadata_center_sdk/model/metadata_center/stream_metric_states_pb2.pyi | 6474aa7019ab29823cb02ed09ca5ff199e2a6073 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from metadata_center_sdk.model.metadata_center.stream_metric_schema_pb2 import (
StreamMetricSchema as metadata_center_sdk___model___metadata_center___stream_metric_schema_pb2___StreamMetricSchema,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class StreamMetricStates(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
org = ... # type: builtin___int
command = ... # type: typing___Text
@property
def payload(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[metadata_center_sdk___model___metadata_center___stream_metric_schema_pb2___StreamMetricSchema]: ...
def __init__(self,
*,
org : typing___Optional[builtin___int] = None,
command : typing___Optional[typing___Text] = None,
payload : typing___Optional[typing___Iterable[metadata_center_sdk___model___metadata_center___stream_metric_schema_pb2___StreamMetricSchema]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> StreamMetricStates: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> StreamMetricStates: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"command",b"command",u"org",b"org",u"payload",b"payload"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
0fbb63bd4a8e6be728510d63e015bbe22888c888 | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/tests/unittests/auto_parallel/test_comm_cost.py | 215385787880c3f6d92f2e63fc47651a0bc5f8f5 | [
"Apache-2.0"
] | permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 6,973 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import json
import tempfile
import paddle
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost import CommContext
from paddle.distributed.auto_parallel.cost import build_comm_desc
from paddle.distributed.auto_parallel.cost import AllreduceSumOpCost
from paddle.distributed.auto_parallel.cost import AllgatherOpCost
from paddle.distributed.auto_parallel.cost import BroadcastOpCost
from paddle.distributed.auto_parallel.cost import SendOpCost
from paddle.distributed.auto_parallel.cost import RecvOpCost
from paddle.distributed.auto_parallel.cost import IdentityOpCost
from test_cluster import cluster_json, multi_cluster_json
class TestCommOpCost(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_comm_cost(self):
# Build cluster
cluster_json_path = os.path.join(self.temp_dir.name,
"auto_parallel_cluster0.json")
cluster_json_object = json.loads(cluster_json)
with open(cluster_json_path, "w") as cluster_json_file:
json.dump(cluster_json_object, cluster_json_file)
cluster = Cluster()
cluster.build_from_file(cluster_json_path)
# Build CommConetxt
CommContext._has_instance = None
CommContext._instance = None
comm_context = CommContext(cluster)
# Check AllreduceSumCost 128MB ring cost
allreduce_sum_op_desc = build_comm_desc("c_allreduce_sum",
[0, 1, 2, 3, 4, 5, 6, 7],
paddle.float32,
[1, 32 * (10**6)])
allreduce_sum_op_cost = AllreduceSumOpCost(
op_desc=allreduce_sum_op_desc, comm_context=comm_context)
# Check AllgatherOpCost cost
allgather_op_desc = build_comm_desc("c_allgather",
[0, 1, 2, 3, 4, 5, 6, 7],
paddle.float32, [1, 32 * (10**6)])
allgather_op_cost = AllgatherOpCost(op_desc=allgather_op_desc,
comm_context=comm_context)
self.assertTrue(allgather_op_cost.time > 0)
# Check BroadcastOpCost cost
broadcast_op_desc = build_comm_desc("c_broadcast",
[0, 1, 2, 3, 4, 5, 6, 7],
paddle.float32, [1, 32 * (10**6)])
broadcast_op_cost = BroadcastOpCost(op_desc=broadcast_op_desc,
comm_context=comm_context)
self.assertTrue(broadcast_op_cost.time > 0)
# Check SendOpCost cost
send_op_desc = build_comm_desc("send_v2", [0, 1], paddle.float32,
[1, 32 * (10**6)])
send_op_cost = SendOpCost(op_desc=send_op_desc,
comm_context=comm_context)
self.assertTrue(send_op_cost.time > 0)
# Check RecvOpCost cost
recv_op_desc = build_comm_desc("recv_v2", [0, 1], paddle.float32,
[1, 32 * (10**6)])
recv_op_cost = RecvOpCost(op_desc=recv_op_desc,
comm_context=comm_context)
self.assertTrue(recv_op_cost.time > 0)
# Check IdentityOpCost cost
identity_op_desc = build_comm_desc("c_identity", [0, 1], paddle.float32,
[1, 32 * (10**6)])
identity_op_cost = IdentityOpCost(op_desc=identity_op_desc,
comm_context=comm_context)
self.assertTrue(identity_op_cost.time >= 0)
def test_cross_machine_comm_cost(self):
# Build cluster
cluster_json_path = os.path.join(self.temp_dir.name,
"auto_parallel_cluster1.json")
cluster_json_object = json.loads(multi_cluster_json)
with open(cluster_json_path, "w") as cluster_json_file:
json.dump(cluster_json_object, cluster_json_file)
cluster = Cluster()
cluster.build_from_file(cluster_json_path)
# Build CommConetxt
CommContext._has_instance = None
CommContext._instance = None
comm_context = CommContext(cluster)
# Check AllreduceSumCost 128MB ring cost
allreduce_sum_op_desc = build_comm_desc(
"c_allreduce_sum",
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
paddle.float32, [1, 32 * (10**6)])
allreduce_sum_op_cost = AllreduceSumOpCost(
op_desc=allreduce_sum_op_desc, comm_context=comm_context)
# Check AllgatherOpCost cost
allgather_op_desc = build_comm_desc(
"c_allgather",
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
paddle.float32, [1, 32 * (10**6)])
allgather_op_cost = AllgatherOpCost(op_desc=allgather_op_desc,
comm_context=comm_context)
self.assertTrue(allgather_op_cost.time > 0)
# Check BroadcastOpCost cost
broadcast_op_desc = build_comm_desc(
"c_broadcast",
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
paddle.float32, [1, 32 * (10**6)])
broadcast_op_cost = BroadcastOpCost(op_desc=broadcast_op_desc,
comm_context=comm_context)
self.assertTrue(broadcast_op_cost.time > 0)
# Check SendOpCost cost
send_op_desc = build_comm_desc("send_v2", [0, 1], paddle.float32,
[1, 32 * (10**6)])
send_op_cost = SendOpCost(op_desc=send_op_desc,
comm_context=comm_context)
self.assertTrue(send_op_cost.time > 0)
# Check RecvOpCost cost
recv_op_desc = build_comm_desc("recv_v2", [0, 1], paddle.float32,
[1, 32 * (10**6)])
recv_op_cost = RecvOpCost(op_desc=recv_op_desc,
comm_context=comm_context)
self.assertTrue(recv_op_cost.time > 0)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | jiweibo.noreply@github.com |
04dcad9ab574e7c0be75f598f89e4979af93b0ff | a01fb7bb8e8738a3170083d84bc3fcfd40e7e44f | /python3/module/cvx/dcp/attribute.py | 040bb7a46b3bcdb696f58232b0e9598ede73246d | [] | no_license | jk983294/CommonScript | f07acf603611b4691b176aa4a02791ef7d4d9370 | 774bcbbae9c146f37312c771c9e867fb93a0c452 | refs/heads/master | 2023-08-21T17:50:19.036159 | 2023-08-16T00:22:03 | 2023-08-16T00:22:03 | 42,732,160 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | import cvxpy as cp
import numpy
# attribute
X = cp.Variable((5, 4))
A = numpy.ones((3, 5))
expression = A * X
print("dimensions of X:", X.shape) # (5, 4)
print("size of X:", X.size) # 20
print("number of dimensions:", X.ndim) # 2
print("dimensions of sum(X):", cp.sum(X).shape) # ()
print("dimensions of A*X:", expression.shape) # (3, 4)
| [
"jk983294@gmail.com"
] | jk983294@gmail.com |
df3aa83d02013449cd4858e8e57a2e1999212577 | 8fe79b2f661a49a03a0d5abd929fd4faa0f53bea | /laboratory7/Controller.py | 8c07d11994a9bd95b227ffb6a06df312650c623e | [] | no_license | VasilicaMoldovan/AI | 03a58e28953ce6942ca141064412a9bcc4d4728c | 6acaf44c9fbd5f419411d85727bc0d8b480118ab | refs/heads/master | 2021-04-08T02:32:43.858389 | 2020-12-01T16:08:10 | 2020-12-01T16:08:10 | 248,730,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 09:36:30 2020
@author: Vasilica
"""
from math import sqrt, isnan
from Problem import Problem
import numpy as np
class Controller:
def __init__(self, problem):
self.__problem = problem
def getError(self, actual, predicted):
sum_error = 0.0
for i in range(len(actual)):
prediction_error = predicted[i] - actual[i]
sum_error += abs(prediction_error)
mean_error = sum_error / float(len(actual))
return abs(mean_error)
def solveGradientDescent(self, dataset, algorithm, n_folds, *args):
folds = self.__problem.cross_validation_split(n_folds)
#print(folds)
scores = list()
error = 0.0
predicted = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
#print(fold)
for row in fold:
#print(row)
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
#print(actual)
rmse = self.getError(actual, predicted)
error = rmse
scores.append(rmse)
return (scores, error)
def predict(self, row, coefficients):
yhat = coefficients[0]
for i in range(len(row) - 1):
yhat += coefficients[i + 1] * row[i]
return yhat
def coefficientsGradientDescent(self, train, l_rate, n_epoch):
coef = []
for i in range(len(train[0])):
coef.append(0.0)
#print(coef)
cnt = 0
for epoch in range(n_epoch):
for row in train:
# print(coef)
yhat = self.predict(row, coef)
#print(yhat)
error = yhat - row[-1]
coef[0] = coef[0] - l_rate * error
for i in range(len(row)-1):
coef[i + 1] = coef[i + 1] - l_rate * error * row[i]
if isnan(coef[0]):
cnt += 1
#print(l_rate, n_epoch, error)
return coef
def regressionGradientDescent(self, train, test, l_rate, n_epoch):
predictions = list()
coef = self.coefficientsGradientDescent(train, l_rate, n_epoch)
#print(coef)
for row in test:
#print(coef)
yhat = self.predict(row, coef)
predictions.append(yhat)
return predictions
| [
"mvie2572@scs.ubbcluj.ro"
] | mvie2572@scs.ubbcluj.ro |
7be19747bc6760b487b84fc6ce67e19b93d4099b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5646553574277120_0/Python/cpu4500/denom.py | 92b0e5b8342f61cccc3e77f445509aa61a846998 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | import numpy as np
def sum_coins(b, coins):
s = 0
for i in xrange(len(coins)):
if b[i] == '1':
s += coins[i]
return s
def mark_denoms(num, coins, v):
lenc = len(coins)
lim = pow(2, lenc)
for n in xrange(1, lim):
b = bin(n)[2:][::-1]
# pad with zeros
b += "0" * (lenc - len(b))
idx = sum_coins(b, coins) - 1
if idx >= v:
break # numbers will not get smaller
num[idx] = 0
return num
def calc_denoms(coins, c, v):
num = [ 1 ] * v
added = 0
# fill the number up to denoms
num = mark_denoms(num, coins, v)
left = np.count_nonzero(num)
while left > 0:
for i in xrange(v):
if num[i] == 0:
continue
coin = i + 1
coins.append(coin)
coins.sort()
added += 1
num = mark_denoms(num, coins, v)
left = np.count_nonzero(num)
break # recalc left
return added
if __name__ == '__main__':
import sys
import time
start_time = time.time()
data = file(sys.argv[1], "rb").read()
lines = data.split('\n')
out = file(sys.argv[1] + "-sol.dat", "wb")
for i in xrange(int(lines[0])):
c, d, v = lines[2*i+1].strip().split(" ")
c = int(c)
d = int(d)
v = int(v)
coins = [int(x) for x in lines[2*i + 2].strip().split(" ")]
if len(coins) != d:
raise Exception("invalid input")
coins.sort()
out.write("Case #%d: %d\n" % (i + 1, calc_denoms(coins, c, v)))
out.close()
print "--- %s seconds ---" % (time.time() - start_time)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
ab4a09f3c25175e4ee9f123e8f04d8198432f0a4 | 9cc76b1b1dd0064ab6613cbca6ce93bc179db355 | /ros_ws/build/learning_ros_dependencies/moveit_msgs/catkin_generated/pkg.installspace.context.pc.py | fe118ef47311dfe0c0ea481c13ace3f1d432a392 | [] | no_license | ABCaps35/learning_ros_ready_ws | 1131c32b2ecadffa8dd186c9ebcfdba7284f30ad | 1aa9c512d5006584e8bc84101a715e16a222a47d | refs/heads/main | 2023-04-03T20:32:58.671255 | 2021-04-13T23:41:13 | 2021-04-13T23:41:13 | 357,715,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;actionlib_msgs;sensor_msgs;geometry_msgs;trajectory_msgs;shape_msgs;object_recognition_msgs;octomap_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "moveit_msgs"
PROJECT_SPACE_DIR = "/home/abcaps35/ros_ws_nogit/install"
PROJECT_VERSION = "0.11.2"
| [
"acapelli345@gmail.com"
] | acapelli345@gmail.com |
42a452aa8e550ba8b9b5dd0c1a73a54ec48e22b1 | 768058e7f347231e06a28879922690c0b6870ed4 | /venv/lib/python3.7/site-packages/numba/core/registry.py | 4b3626d29d284a53c046c5f95484133f038f026a | [] | no_license | jciech/HeisenbergSpinChains | 58b4238281d8c158b11c6c22dd0da82025fd7284 | e43942bbd09f6675e7e2ff277f8930dc0518d08e | refs/heads/master | 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,915 | py | import contextlib
from numba.core.descriptors import TargetDescriptor
from numba.core import utils, typing, dispatcher, cpu
# -----------------------------------------------------------------------------
# Default CPU target descriptors
class _NestedContext(object):
_typing_context = None
_target_context = None
@contextlib.contextmanager
def nested(self, typing_context, target_context):
old_nested = self._typing_context, self._target_context
try:
self._typing_context = typing_context
self._target_context = target_context
yield
finally:
self._typing_context, self._target_context = old_nested
class CPUTarget(TargetDescriptor):
options = cpu.CPUTargetOptions
_nested = _NestedContext()
@utils.cached_property
def _toplevel_target_context(self):
# Lazily-initialized top-level target context, for all threads
return cpu.CPUContext(self.typing_context)
@utils.cached_property
def _toplevel_typing_context(self):
# Lazily-initialized top-level typing context, for all threads
return typing.Context()
@property
def target_context(self):
"""
The target context for CPU targets.
"""
nested = self._nested._target_context
if nested is not None:
return nested
else:
return self._toplevel_target_context
@property
def typing_context(self):
"""
The typing context for CPU targets.
"""
nested = self._nested._typing_context
if nested is not None:
return nested
else:
return self._toplevel_typing_context
def nested_context(self, typing_context, target_context):
"""
A context manager temporarily replacing the contexts with the
given ones, for the current thread of execution.
"""
return self._nested.nested(typing_context, target_context)
# The global CPU target
cpu_target = CPUTarget()
class CPUDispatcher(dispatcher.Dispatcher):
targetdescr = cpu_target
class TargetRegistry(utils.UniqueDict):
"""
A registry of API implementations for various backends.
Attributes
----------
ondemand:
A dictionary of target-name -> function, where function is executed
the first time a target is used. It is used for deferred
initialization for some targets (e.g. gpu).
"""
def __init__(self, *args, **kws):
super(TargetRegistry, self).__init__(*args, **kws)
self.ondemand = utils.UniqueDict()
def __getitem__(self, item):
if item in self.ondemand:
self[item] = self.ondemand[item]()
del self.ondemand[item]
return super(TargetRegistry, self).__getitem__(item)
dispatcher_registry = TargetRegistry()
dispatcher_registry["cpu"] = CPUDispatcher
| [
"jan@multiply.ai"
] | jan@multiply.ai |
da2351dd70ab58ebc18d80a0f77cde681fe82d54 | dba522d0d9f1677672af03c81a0118565158c659 | /Net/LineConnect.py | fdc0cfb6e99f30cf3fa115c24a8c254960ede875 | [] | no_license | fossabot/LineAlpha-Full-Ver | c6fefbf0d1d69b744c2913e0e1fd51ade5f931d5 | cabe9ab158d358ddb92195855ff07c7d483c6c20 | refs/heads/master | 2022-12-18T07:44:02.743358 | 2020-09-14T12:31:57 | 2020-09-14T12:31:57 | 295,410,125 | 0 | 0 | null | 2020-09-14T12:31:45 | 2020-09-14T12:31:44 | null | UTF-8 | Python | false | false | 4,229 | py | # -*- coding: utf-8 -*-
import json
import rsa
from ..Gen import TalkService
from ..Gen.ttypes import *
from .LineServer import url
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
from .LineTransport import LineTransport
from ..Api.LineCallback import LineCallback
class LineConnect(object):
_thriftTransport = None
_thriftProtocol = None
onLogin = False
authToken = ""
certificate = ""
def __init__(self):
self._transportOpen(url.HOST)
self.callback = LineCallback(self.defaultCall)
def _transportOpen(self, host, path=None):
if path is not None:
self._thriftTransport = LineTransport(host + path)
else:
self._thriftTransport = LineTransport(host)
self._thriftProtocol = TCompactProtocol.TCompactProtocol(
self._thriftTransport)
self._client = TalkService.Client(self._thriftProtocol)
def _login(self, email, passwordd, certificate=None, loginName='kaopy'):
self._thriftTransport.targetPath(url.REGISTRATION)
session_json = url.get_json(url.parseUrl(url.SESSION_KEY))
self.certificate = certificate
session_key = session_json['session_key']
message = (chr(len(session_key)) + session_key +
chr(len(email)) + email +
chr(len(passwordd)) + passwordd).encode('utf-8')
keyname, n, e = session_json['rsa_key'].split(",")
pub_key = rsa.PublicKey(int(n, 16), int(e, 16))
crypto = rsa.encrypt(message, pub_key).encode('hex')
self._thriftTransport.targetPath(url.REGISTRATION)
result = self._client.loginWithIdentityCredentialForCertificate(
IdentityProvider.LINE, keyname, crypto, True, '127.0.0.1', loginName, certificate)
if result.type == 3:
# required pin verification
url._pincode = result.pinCode
self.callback.Pinverified(url._pincode)
url.set_Headers('X-Line-Access', result.verifier)
getAccessKey = url.get_json(
url.parseUrl(url.CERTIFICATE), allowHeader=True)
self.verifier = getAccessKey['result']['verifier']
result = self._client.loginWithVerifierForCerificate(self.verifier)
self.certificate = result.certificate
self.authToken = result.authToken
self._thriftTransport.setAccesskey(self.authToken)
self.onLogin = True
self._thriftTransport.targetPath(url.NORMAL)
elif result.type == 2:
pass
elif result.type == 1:
self.authToken = result.authToken
self._thriftTransport.setAccesskey(self.authToken)
self.onLogin = True
self._thriftTransport.targetPath(url.NORMAL)
def _tokenLogin(self, authToken):
self._thriftTransport.targetPath(url.REGISTRATION)
self._thriftTransport.setAccesskey(authToken)
self.authToken = authToken
self.onLogin = True
self._thriftTransport.targetPath(url.NORMAL)
def _qrLogin(self, keepLoggedIn=True, systemName="kaopy"):
self._thriftTransport.targetPath(url.REGISTRATION)
qr = self._client.getAuthQrcode(keepLoggedIn, systemName)
self.callback.QrUrl("line://au/q/" + qr.verifier)
url.set_Headers('X-Line-Application', url.LINE_APPLICATION)
url.set_Headers('X-Line-Access', qr.verifier)
verified = url.get_json(
url.parseUrl(url.CERTIFICATE), allowHeader=True)
vr = verified['result']['verifier']
lr = self._client.loginWithVerifierForCertificate(vr)
self._thriftTransport.setAccesskey(lr.authToken)
self.authToken = lr.authToken
self.onLogin = True
self._thriftTransport.targetPath(url.NORMAL)
def setCallback(self, callback):
self.callback = LineCallback(callback)
def defaultCall(self, str):
print str
def _logout(self):
self._client.logoutSession(self.authToken)
self._thriftTransport.setAccesskey("") | [
"noreply@github.com"
] | fossabot.noreply@github.com |
aa34e6c6aa8371efa8bb6814efb0a76d2c5b9b27 | a86293a2033c06410aa8ed19bcbce8ca55ea3c55 | /src/client_libraries/python/microsoft/dynamics/customerinsights/api/models/attribute_search_configuration_py3.py | 8582fe1585becb208318262b3fd766a56ec232ce | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ramotheonly/Dynamics365-CustomerInsights-Client-Libraries | a3ca28aa78d2b5509e65d9895ff4a0d42d05f611 | e00632f7972717b03e0fb1a9e2667e8f9444a0fe | refs/heads/main | 2023-08-02T08:09:04.063030 | 2021-09-28T22:42:15 | 2021-09-28T22:42:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AttributeSearchConfiguration(Model):
"""AttributeSearchConfiguration.
:param name: Gets the attribute name.
:type name: str
:param properties:
:type properties:
~microsoft.dynamics.customerinsights.api.models.AttributeSearchProperties
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'AttributeSearchProperties'},
}
def __init__(self, *, name: str=None, properties=None, **kwargs) -> None:
super(AttributeSearchConfiguration, self).__init__(**kwargs)
self.name = name
self.properties = properties
| [
"michaelajohnston@mac.com"
] | michaelajohnston@mac.com |
d9d364c470cfcfcc8030af94aa94f4c8382d4e56 | 48a522b031d45193985ba71e313e8560d9b191f1 | /baekjoon/python/8870.py | 7369728bd27be2099a06b95321854fe065090f66 | [] | no_license | dydwnsekd/coding_test | beabda0d0aeec3256e513e9e0d23b43debff7fb3 | 4b2b4878408558239bae7146bb4f37888cd5b556 | refs/heads/master | 2023-09-04T12:37:03.540461 | 2023-09-03T15:58:33 | 2023-09-03T15:58:33 | 162,253,096 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #TODO
import sys
n = int(sys.stdin.readline())
ret = 0
for s in range(1, n+1):
for k in range(s, n+1):
for i in range(k, n+1):
ret = (ret+s*k//i) % 2010
print(ret)
| [
"dydwnsekd123@gmail.com"
] | dydwnsekd123@gmail.com |
d3d7c547ede2482c2ad59dc6e66d5e9a72b12c2e | 20eee94bbdab84536d6308c6c1e46dd1d85ce1a5 | /variable_scope_test.py | 3a1d90cf3049cef36a29069078817192aeca23b2 | [] | no_license | hccho2/hccho2FirstGitProject | d73fbfd4332c3d81f449e7506695435a0e739e80 | fc454f210de6dbd93d047f3c263089a90690715c | refs/heads/master | 2023-08-30T03:05:54.533544 | 2023-08-28T01:35:04 | 2023-08-28T01:35:04 | 87,028,484 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # -*- coding: utf-8 -*-
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import tensorflow as tf
tf.reset_default_graph()
def G(name,input):
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
out = tf.layers.dense(input,units=10)
return out
x1 = tf.placeholder(tf.float32,[None,100])
x2 = tf.placeholder(tf.float32,[None,100])
y = G('a',x1)
z = G('a',x2)
w = G('b',x1) | [
"noreply@github.com"
] | hccho2.noreply@github.com |
dab240af6adc8f79725bd46461a80c07b8f8214a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02850/s384836888.py | f3481ff2085aa579f1d2e4d638166132921e37e8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import sys
input = sys.stdin.readline
sys.setrecursionlimit(2147483647)
class Edge:
def __init__(self, to, id):
self.to = to
self.id = id
N = int(input())
graph = {}
ans = [0] * (N-1)
def dfs(v, c=-1, p=-1):
global graph, ans
k = 1
for edge in graph[v]:
nv = edge.to
if nv == p:continue
if k == c:k += 1
ans[edge.id] = k
dfs(nv, k, v)
k += 1
def main():
global N, graph, ans
for i in range(N):
graph[i] = set()
for i in range(N-1):
a, b = map(int, input().split())
graph[a-1].add(Edge(b-1, i))
graph[b-1].add(Edge(a-1, i))
color_count = 0
for i in range(N):
color_count = max(color_count, len(graph[i]))
dfs(0, 0, -1)
print(color_count)
for x in ans:
print(x)
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c66c3d810bab917c1f7f9dc7040a59b37b36317c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03687/s591338614.py | db864d6db120976b763aeba88c36051e35621fe4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | def myAnswer(s:list) -> int:
setS = set(s)
if(len(setS) == 1): return 0
ans = 10**9
while len(setS) != 0:
target = setS.pop()
counter = 0
tmp = s[:]
while True:
pre = tmp.pop(0)
N = len(tmp)
for i in range(N):
now = tmp.pop(0)
if(pre == target or now==target):
tmp.append(target)
else:
tmp.append(now)
pre = now
counter += 1
if(len(set(tmp))==1):
break
ans = min(ans,counter)
return ans
def modelAnswer():
return
def main():
s = list(input())
print(myAnswer(s[:]))
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1f50dd9939181f3d765b88857dec6ee074c8bf54 | eef39fd96ef4ed289c1567f56fde936d5bc42ea4 | /BaekJoon/Bronze2/1100.py | 7ca5051440d1f755fd196ebe82f436d249269c7a | [] | no_license | dudwns9331/PythonStudy | 3e17da9417507da6a17744c72835c7c2febd4d2e | b99b9ef2453af405daadc6fbf585bb880d7652e1 | refs/heads/master | 2023-06-15T12:19:56.019844 | 2021-07-15T08:46:10 | 2021-07-15T08:46:10 | 324,196,430 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | # 하얀 칸
"""
2021-01-17 오후 7:09
안영준
문제
체스판은 8*8크기이고, 검정 칸과 하얀 칸이 번갈아가면서 색칠되어 있다.
가장 왼쪽 위칸 (0,0)은 하얀색이다. 체스판의 상태가 주어졌을 때, 하얀 칸 위에 말이 몇 개 있는지 출력하는 프로그램을 작성하시오.
입력
첫째 줄부터 8개의 줄에 체스판의 상태가 주어진다. ‘.’은 빈 칸이고, ‘F’는 위에 말이 있는 칸이다.
출력
첫째 줄에 문제의 정답을 출력한다.
"""
count = 0
for i in range(1, 9):
line = input()
if i % 2 == 0:
for j in range(len(line)):
if j % 2 != 0:
if line[j] == 'F':
count += 1
else:
for j in range(len(line)):
if j % 2 == 0:
if line[j] == 'F':
count += 1
print(count)
| [
"dudwns1045@naver.com"
] | dudwns1045@naver.com |
dfe1288981d2da36ed164fecd92930e9d85e09d7 | a74b980fd95d5d810315f181449fc9d1710e6923 | /savecode/pythonpackages/tests/test_sqlite.py | c6f5a6df2c40fe3e1cbb083c61322c53b282bbf5 | [
"Apache-2.0"
] | permissive | cbbbbbbbb/sspywork | b70f5539203b47b21eec2f0514ddca155affc2b8 | 8f05a6b91fc205960edd57f9076facec04f49a1a | refs/heads/master | 2023-03-22T19:45:13.024076 | 2021-03-08T01:24:21 | 2021-03-08T01:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,860 | py | """sqlite test"""
# -*- coding:utf-8 -*-
import threading
import time
import traceback
from commonbaby.mslog import MsFileLogConfig, MsLogLevels, MsLogManager
MsLogManager.static_initial(
dft_lvl=MsLogLevels.INFO, msficfg=MsFileLogConfig(fi_dir=r'./_serverlog'))
logger = MsLogManager.get_logger("idownserver")
from commonbaby.sql import (SqlConn, SqliteColumn, SqliteConn,
SqliteConnManager, SqliteCursor, SqliteIndex,
SqliteTable, table_locker)
from commonbaby.helpers import helper_time
__locker = threading.RLock()
__locker2 = threading.RLock()
tables: dict = {
"TableA":
SqliteTable(
"TableA",
True,
SqliteColumn("Col1", 'INTEGER', None, False, True, True,
True).set_index_new("Idx1"),
SqliteColumn("Col2", nullable=False, defaultval='DFT'),
SqliteColumn("Col3", 'INTEGER', defaultval=1),
),
}
# tables: dict = {
# "ClientStatus":
# SqliteTable(
# "ClientStatus",
# True,
# SqliteColumn(
# colname="Id",
# coltype='INTEGER',
# nullable=False,
# is_primary_key=True,
# is_auto_increament=True,
# is_unique=True).set_index_new(),
# SqliteColumn(colname="ClientId", nullable=False).set_index_new(),
# SqliteColumn(colname="SystemVer"),
# SqliteColumn(colname="IP"),
# SqliteColumn(colname="Mac"),
# SqliteColumn(colname="CrossWall", coltype='INTEGER'),
# SqliteColumn(colname="Country"),
# SqliteColumn(colname="Platform"),
# SqliteColumn(colname="AppType", coltype='INTEGER'),
# SqliteColumn(colname="TaskType", coltype='INTEGER'),
# SqliteColumn(colname="AppClassify", coltype='INTEGER'),
# SqliteColumn(colname="CpuSize", coltype='REAL'),
# SqliteColumn(colname="CpuPerc", coltype='REAL'),
# SqliteColumn(colname="MemSize", coltype='REAL'),
# SqliteColumn(colname="MemPerc", coltype='REAL'),
# SqliteColumn(colname="BandWidthd", coltype='REAL'),
# SqliteColumn(colname="BandWidthdPerc", coltype='REAL'),
# SqliteColumn(colname="DiskSize", coltype='REAL'),
# SqliteColumn(colname="DiskPerc", coltype='REAL'),
# SqliteColumn(colname="TaskNewCnt", coltype='INTEGER'),
# SqliteColumn(colname="TaskWaitingCnt", coltype='INTEGER'),
# SqliteColumn(colname="TaskDownloadingCnt", coltype='INTEGER'),
# SqliteColumn(colname="UpdateTime", coltype='REAL',
# nullable=False).set_index_new(),
# ),
# "IDownTask":
# SqliteTable(
# "IDownTask",
# True,
# SqliteColumn(
# colname="Id",
# coltype='INTEGER',
# nullable=False,
# is_primary_key=True,
# is_auto_increament=True,
# is_unique=True).set_index_new(),
# SqliteColumn(colname="ClientId", nullable=False).set_index_new(),
# SqliteColumn(colname="Platform", nullable=False),
# SqliteColumn(colname="TaskId", nullable=False).set_index_new(),
# SqliteColumn(colname="ParentTaskId").set_index_new(),
# SqliteColumn(colname="Status").set_index_new(),
# SqliteColumn(colname="BatchTotalCount"),
# SqliteColumn(colname="BatchCompleteCount").set_index_new(),
# SqliteColumn(colname="TaskType", coltype='INTEGER', nullable=False),
# SqliteColumn(colname="TokenType", coltype='INTEGER').set_index_new(),
# SqliteColumn(colname="AppType", coltype='INTEGER').set_index_new(),
# SqliteColumn(colname="Input"),
# SqliteColumn(colname="PreGlobalTelCode"),
# SqliteColumn(colname="PreAccount"),
# SqliteColumn(colname="GlobalTelCode"),
# SqliteColumn(colname="Phone"),
# SqliteColumn(colname="Account"),
# SqliteColumn(colname="Password"),
# SqliteColumn(colname="Url"),
# SqliteColumn(colname="Host"),
# SqliteColumn(colname="Cookie"),
# SqliteColumn(colname="CmdRcvMsg"),
# SqliteColumn(colname="Result"),
# SqliteColumn(
# colname="CreateTime",
# coltype='DATETIME',
# defaultval="datetime('1970-01-01 00:00:00')"),
# SqliteColumn(colname="Sequence", coltype='INTEGER',
# defaultval=0).set_index_new(),
# SqliteColumn(colname="OtherFields").set_index_new(),
# SqliteColumn(colname="UpdateTime", coltype='REAL',
# nullable=False).set_index_new(),
# )
# }
_dbmngrs: dict = {}
class TestDb:
def __init__(self):
self.mngr: SqliteConnManager = SqliteConnManager(
dbdir=r'./_database',
dbname='aaa.db',
maxdbfisize=1024 * 1024,
)
for tb in tables.values():
self.mngr.append_table(tb)
# print("ok")
@table_locker("TableA")
def write1(num):
db = TestDb()
_dbmngrs[num] = db.mngr
# while True:
# print(f"t{num} waiting")
# time.sleep(1)
while True:
tt1 = time.time()
flag = 0
flag1 = 0
sqlsearch = """select count() from TableA"""
while flag1 < 1000:
# t1 = time.time()
flag = 0
conn: SqlConn = db.mngr.connect_write()
try:
logger.info("t{} got write conn".format(num))
# for i in range(1):
# time.sleep(1)
# logger.info('t{} sleep 1'.format(num))
with __locker:
while flag < 10000:
t = time.time()
sql = """insert into TableA(Col2,Col3) values(?,?)"""
# res = db.execute_modify(sql)
res = conn.execute(sql, (
str(flag),
flag,
))
# logger.info("{} {}".format(flag, res))
flag += 1
conn.commit()
finally:
conn.close()
with __locker2:
count = 0
dbcnt = 0
# for sa in db.mngr.execute_search_all(sqlsearch, True):
# count += sa[0][0]
# dbcnt += 1
for con in db.mngr.connect_all():
try:
con: SqliteConn = con
cursor = con.cursor
cursor.execute(sqlsearch)
result = cursor.fetchall()
count += result[0][0]
dbcnt += 1
finally:
con.close()
logger.info(
"t{} got allconn, data count={} , dbcount={}".format(
num, count, dbcnt))
# t2 = time.time()
# logger.info("{} - {} = {}".format(t2, t1, t2 - t1))
flag1 += 1
tt2 = time.time()
logger.info("{} : {} - {} = {}".format(num, tt2, tt1, tt2 - tt1))
logger.info("{} ok".format(num))
def test():
for i in range(1):
t1 = threading.Thread(target=write1, args=(i, ))
t1.start()
# t2.start()
while True:
time.sleep(1)
if __name__ == "__main__":
try:
dbdir = r"F:\WorkSpace\Projects_Others\IMEIDB\imeidb\output"
dbfi = 'tacdb.sqlite3'
_db = SqliteConnManager(dbdir, dbfi)
test()
while True:
time.sleep(1)
except Exception:
try:
logger.critical("Program error: %s" % traceback.format_exc())
except Exception:
print("Program error: %s" % traceback.format_exc())
finally:
time.sleep(5)
| [
"shiyuegege@qq.com"
] | shiyuegege@qq.com |
f7960ee36f4641f463cb61eb2b964f11c33b2068 | 4cbc8b81d197bc392d1b57856254300331b9738f | /python/teste_print.py | bc79e58ac5c74758c228d710a02fdf2665a3331e | [
"MIT"
] | permissive | vcatafesta/chili | 87b9606f17cda645ba44cbf2bb4cc4637e18d211 | 5c734ac88454db76eb2f4e92c13364a5bbc7a93a | refs/heads/main | 2023-09-01T01:39:09.457448 | 2023-08-29T21:23:28 | 2023-08-29T21:23:28 | 171,972,556 | 2 | 2 | null | 2019-02-22T01:38:49 | 2019-02-22T01:26:46 | null | UTF-8 | Python | false | false | 696 | py | scores = [("Rodney Dangerfield", -1), ("Marlon Brando", 1), ("You", 100)]
for (name, score) in scores:
print "Hello %s. Your score is % d" % (name, score)
# or some might find this even more readable
for (name, score) in scores:
print "Hello %(nm)s. Your score is %(sc) d" % {"nm": name, "sc": score}
x = 3.75
print x
print "You have $%0.2f in your pocket" % (x)
print "You have $%f in your pocket" % (x)
print "You have $%10.1f in your pocket" % (x)
print "You have $%0.0f in your pocket" % (x)
print "You have $%d in your pocket" % (x)
print "You have $%02d in your pocket" % (x)
print "You have $%0.2f. If you spend $1.25, you will have $%0.2f left" % (x, x - 1.25)
| [
"vcatafesta@gmail.com"
] | vcatafesta@gmail.com |
780010a1b1b2f7876d6f3676f420597215ea2021 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/easy/9_5.py | bf3e8b5e93166e4b412093de3b2aa32a158d759a | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,890 | py | Load testing using LOCUST
Locust is an open source load testing tool. Load testing is a type of software
testing that is conducted to check the tolerance/behavior of the system under
a specific expected load. The target of locust is load-testing web sites and
checking number of concurrent users a system can handle.
During a locust test, a swarm of locusts will attack the target i.e website.
The behavior of each locust is configurable and the swarming process is
monitored from a web UI in real-time.
**Speciality of locust:**
* Test scenarios can be written in Python
* Distributed and scalable
* Web-based UI
* Any system can be tested using this tool
**Installation:**
Locust can be installed with pip.
pip install locust
Once the locust is successfully installed, a locust command should be
available in your shell.
To see more available options:
locust --help
**Getting started:**
__
__
__
__
__
__
__
from locust import HttpLocust, TaskSet, task
from locust import ResponseError
import json
class UserBehavior(TaskSet):
def __init__(self, parent):
super(UserBehavior, self).__init__(parent)
self.token = ""
self.headers = {}
def on_start(self):
# The on_start method is called
# when a simulated user starts
# executing that TaskSet class
self.token = self.login()
self.headers = {'Authorization': 'Bearer
{}'.format(self.token)}
self.login()
def login(self):
# admin login and retrieving it's access token
response = self.client.post("/login/",
data = {'username': 'admin',
'password': 'ZYT5nsg3565!'})
return json.loads(response._content)['access']
class WebsiteUser(HttpLocust):
# The task_set attribute should point
# to a TaskSet class which defines
# the behaviour of the user
task_set = UserBehavior
min_wait = 5000
max_wait = 9000
---
__
__
**Start locust:**
To run the above code, create a Python file named locustfile.py, and open
the terminal in the directory of the above created file. Then write the
following command in the terminal.
locust
**Note:** By default locust searches for locustfile.py.
After the successful execution of the above command, you should open a browser
and hit **http://127.0.0.1:8089**
The Locust UI will appear like below:

Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
1652edd4c3747efb4783d476d5fbd2be9dddd7cc | 8c8f08a1d0625c376fe6c8eed0b7d94585de9a15 | /src/webmessage/migrations/0001_initial.py | d5ce52348f722d65fc5969d7c9019d835ceabe9d | [] | no_license | NobodyLikesZergs/mailru_track_web_2 | c804108f11f71fccafbaf78ca38cd211fbf16845 | 27c5347876fed8930ab67cf0ca9ba65143b2b21e | refs/heads/master | 2021-01-11T01:14:59.948168 | 2016-10-13T20:50:08 | 2016-10-13T20:50:08 | 70,682,961 | 0 | 0 | null | 2016-10-12T09:10:31 | 2016-10-12T09:10:31 | null | UTF-8 | Python | false | false | 1,037 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 09:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('webchat', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_date', models.DateField(auto_now_add=True)),
('chat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='webchat.Chat')),
],
options={
'ordering': ('-create_date',),
'verbose_name': '\u0421\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435',
'verbose_name_plural': '\u0421\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u044f',
},
),
]
| [
"makaleks@live.ru"
] | makaleks@live.ru |
5d37ddbdf8a634044a7ef1da092d0649172029ab | b06e21f2731fd1000fbe5694312aa9cebf543809 | /ElectronPhononCoupling/tests/test_LiF_g2.py | 30f27a1a15dde2e73410e84e9de3292cef0df372 | [] | no_license | Maruf001/ElectronPhononCoupling | 932be124a58b1d49ebbdb5655ea7e30fcf9b6e20 | 434be79b20397bcc8ab4789f7fbde9de55deca77 | refs/heads/master | 2023-03-17T09:42:27.631568 | 2021-01-06T21:37:06 | 2021-01-06T21:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,912 | py | from os.path import join as pjoin
from copy import copy
from . import EPCTest, SETest
from ..data import LiF_g2 as test
# FIXME
class Test_LiF_g2(SETest):
common = dict(
temperature = False,
renormalization = False,
broadening = False,
self_energy = False,
spectral_function = False,
dynamical = True,
split_active = True,
double_grid = False,
write = True,
verbose = False,
nqpt=test.nqpt,
wtq=test.wtq,
smearing_eV=0.01,
temp_range=[0,300,300],
omega_range=[-.1,.1,.001],
rootname = 'epc.out',
**test.fnames)
@property
def refdir(self):
return test.refdir
def test_zpr_dyn(self):
"""Dynamical ZPR"""
self.run_compare_nc(
function = self.get_zpr_dyn,
key = 'zero_point_renormalization',
)
def test_tdr_dyn(self):
"""Dynamical Tdep Ren"""
self.run_compare_nc(
function = self.get_tdr_dyn,
key = 'temperature_dependent_renormalization',
)
def test_zp_se(self):
"""Zero Point Self-Energy"""
self.run_compare_nc(
function = self.get_zp_se,
key = 'self_energy',
)
def test_zp_sf(self):
"""Zero Point Spectral Function"""
self.run_compare_nc(
function = self.get_zp_sf,
key = 'spectral_function',
)
def test_td_se(self):
"""Temperature Dependent Self-Energy"""
self.run_compare_nc(
function = self.get_td_se,
key = 'self_energy_temperature_dependent',
)
def test_td_sf(self):
"""Temperature Dependent Spectral Function"""
self.run_compare_nc(
function = self.get_td_sf,
key = 'spectral_function_temperature_dependent',
)
def test_zpr_stat(self):
"""Static ZP Ren"""
self.run_compare_nc(
function = self.get_zpr_stat,
key = 'zero_point_renormalization',
)
def test_tdr_stat(self):
"""Static Tdep Ren"""
self.run_compare_nc(
function = self.get_tdr_stat,
key = 'temperature_dependent_renormalization',
)
def test_zpr_stat_nosplit(self):
"""Static Zero Point Renormalization"""
self.run_compare_nc(
function = self.get_zpr_stat_nosplit,
key = 'zero_point_renormalization',
)
def test_tdr_static_nosplit(self):
"""Static Temperature Dependent Renormalization"""
self.run_compare_nc(
function = self.get_tdr_stat_nosplit,
key = 'temperature_dependent_renormalization',
)
def test_zpb_stat_nosplit(self):
"""Static Zero Point Broadening"""
self.run_compare_nc(
function = self.get_zpb_stat_nosplit,
key = 'zero_point_broadening',
)
def test_tdb_stat_nosplit(self):
"""Static Temperature Dependent Broadening"""
self.run_compare_nc(
function = self.get_tdb_stat_nosplit,
key = 'temperature_dependent_broadening',
)
# All
def generate(self):
"""Generate epc data for all tests."""
print('Generating reference data for tests in directory: {}'.format(
self.refdir))
for function in (
self.get_zpr_dyn,
self.get_tdr_dyn,
self.get_zp_se,
self.get_zp_sf,
self.get_td_se,
self.get_td_sf,
self.get_zpr_stat,
self.get_tdr_stat,
self.get_zpr_stat_nosplit,
self.get_tdr_stat_nosplit,
self.get_zpb_stat_nosplit,
self.get_tdb_stat_nosplit,
):
self.generate_ref(function)
| [
"gabriel.antonius@gmail.com"
] | gabriel.antonius@gmail.com |
868be2877a85a8b54a78bfe5480066d2e84303ba | 72be6500051e0dcf8b24a8586609454a94c73b8d | /apps/niamoto_data/migrations/0019_occurrenceobservations_rainfall.py | 9938b9b7610b9d70580e1a553e8f0aa4ed604b7e | [] | no_license | CheckFly/niamoto-portal | e3e34c26ad6b4f45e505ec3d5c48f3052f08e16d | 5e15a1cb13caefd9fa4f4bb4d74bd0b7d705aa58 | refs/heads/master | 2021-07-04T06:11:41.925705 | 2019-06-02T22:00:49 | 2019-06-02T22:00:49 | 200,769,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-19 06:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('niamoto_data', '0018_auto_20161215_1716'),
]
operations = [
migrations.AddField(
model_name='occurrenceobservations',
name='rainfall',
field=models.FloatField(blank=True, null=True),
),
]
| [
"dimitri.justeau@gmail.com"
] | dimitri.justeau@gmail.com |
a9e54e01313436d06d1e8dbb4f21d52fd9a9231d | 105212e4d2d2175d5105e05552e29b300375e039 | /TensorFlow_tutorials/TensorFlow_simulation_demos/Mandelbrot_demo.py | e1261e0eae4ace11c428700fc08e15a6bf4f5ba9 | [] | no_license | Asher-1/AI | 84f0c42651c0b07e6b7e41ebb354258db64dd0d1 | a70f63ebab3163f299f7f9d860a98695c0a3f7d5 | refs/heads/master | 2022-11-26T07:24:37.910301 | 2019-05-30T13:04:31 | 2019-05-30T13:04:31 | 160,031,310 | 7 | 1 | null | 2022-11-21T22:02:53 | 2018-12-02T09:19:03 | Jupyter Notebook | UTF-8 | Python | false | false | 1,563 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: Asher
@time:2018/3/25 14:08
"""
# 导入仿真库
import tensorflow as tf
import numpy as np
# 导入可视化库
# import PIL.Image
# from io import BytesIO
# from IPython.display import Image, display
import matplotlib.pyplot as plt
def DisplayFractal(a, fmt='jpeg'):
"""显示迭代计算出的彩色分形图像。"""
a_cyclic = (6.28 * a / 20.0).reshape(list(a.shape) + [1])
img = np.concatenate([10 + 20 * np.cos(a_cyclic),
30 + 50 * np.sin(a_cyclic),
155 - 80 * np.cos(a_cyclic)], 2)
img[a == a.max()] = 0
a = img
a = np.uint8(np.clip(a, 0, 255))
# f = BytesIO()
# PIL.Image.fromarray(a).save(f, fmt)
plt.imshow(a)
plt.show()
sess = tf.InteractiveSession()
# 使用NumPy创建一个在[-2,2]x[-2,2]范围内的2维复数数组
Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
Z = X + 1j * Y
xs = tf.constant(Z.astype("complex64"))
zs = tf.Variable(xs)
ns = tf.Variable(tf.zeros_like(xs, "float32"))
tf.initialize_all_variables().run()
# 计算一个新值z: z^2 + x
zs_ = zs * zs + xs
# 这个新值会发散吗?
not_diverged = tf.abs(zs_) < 4
# 更新zs并且迭代计算。
#
# 说明:在这些值发散之后,我们仍然在计算zs,这个计算消耗特别大!
# 如果稍微简单点,这里有更好的方法来处理。
#
step = tf.group(
zs.assign(zs_),
ns.assign_add(tf.cast(not_diverged, "float32"))
)
for i in range(1000):
step.run()
DisplayFractal(ns.eval())
| [
"ludahai19@163.com"
] | ludahai19@163.com |
555abece4d71e799a18ff8b00af46dd6de89ea16 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/GRR/Scripts/GrrGetHunt/GrrGetHunt.py | b8947a34a523ef96972d0e9fcc58340fcd2ebbd0 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 164 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
demisto.results(demisto.executeCommand("grr_get_hunt", demisto.args()))
| [
"noreply@github.com"
] | demisto.noreply@github.com |
b4fb9f5b74cea3550f2ef48cae41c7726d36f8bf | 6b3ae09a44b187cf8bb0909b33e422a0ddea2a9e | /tests/test_io_text_orthography.py | 99cabec543c81d808e0200458294346b73175cab | [
"MIT"
] | permissive | esteng/PolyglotDB | 963bb48f7d38fb26e9e71dcc60927b47c101a114 | 32a0861a9559a71e99f13ad2c7f0c73c50f45a58 | refs/heads/master | 2020-05-29T11:46:30.187198 | 2016-06-15T20:34:59 | 2016-06-15T20:34:59 | 59,850,947 | 0 | 0 | null | 2016-10-18T18:58:22 | 2016-05-27T17:11:41 | Python | UTF-8 | Python | false | false | 1,610 | py |
import pytest
import os
from polyglotdb.io import inspect_orthography
from polyglotdb.exceptions import DelimiterError
from polyglotdb import CorpusContext
def test_load_spelling_no_ignore(graph_db, text_spelling_test_dir):
spelling_path = os.path.join(text_spelling_test_dir, 'text_spelling.txt')
parser = inspect_orthography(spelling_path)
with CorpusContext('spelling_no_ignore', **graph_db) as c:
c.reset()
c.load(parser, spelling_path)
#assert(c.lexicon['ab'].frequency == 2)
def test_load_spelling_directory(graph_db, text_spelling_test_dir):
parser = inspect_orthography(text_spelling_test_dir)
with CorpusContext('spelling_directory', **graph_db) as c:
c.load(parser, text_spelling_test_dir)
@pytest.mark.xfail
def test_export_spelling(graph_db, export_test_dir):
export_path = os.path.join(export_test_dir, 'export_spelling.txt')
with CorpusContext('spelling_no_ignore', **graph_db) as c:
export_discourse_spelling(c, 'text_spelling', export_path, words_per_line = 10)
with open(export_path,'r') as f:
assert(f.read() == 'ab cab\'d ad ab ab.')
def test_load_spelling_ignore(graph_db, text_spelling_test_dir):
spelling_path = os.path.join(text_spelling_test_dir, 'text_spelling.txt')
parser = inspect_orthography(spelling_path)
parser.annotation_types[0].ignored_characters = set(["'",'.'])
with CorpusContext('spelling_ignore', **graph_db) as c:
c.reset()
c.load(parser, spelling_path)
#assert(c.lexicon['ab'].frequency == 3)
#assert(c.lexicon['cabd'].frequency == 1)
| [
"michael.e.mcauliffe@gmail.com"
] | michael.e.mcauliffe@gmail.com |
760bb0083709a0b6ad7c947c2bafd496cd2f2af1 | 9cec93a18ea94504947820205d0faae4d67ecd8d | /TTHAnalysis/python/tools/jetReCleanerExamples.py | 9663fc4e48bd369bf0cb7cddbc13b81cc730777f | [] | no_license | DESY-CMS-SUS/cmgtools-lite | de88b1d5dc20a925ed5b7c7be69fa3ef677955c6 | db52d50047178563a0eb7f5858ae100aa408ec68 | refs/heads/8_0_25 | 2021-05-23T04:36:22.900460 | 2017-11-09T10:32:41 | 2017-11-09T10:32:41 | 60,184,794 | 3 | 9 | null | 2021-02-17T23:22:12 | 2016-06-01T14:37:18 | Python | UTF-8 | Python | false | false | 6,572 | py | from CMGTools.TTHAnalysis.treeReAnalyzer import Collection, deltaR
from CMGTools.TTHAnalysis.tools.collectionSkimmer import CollectionSkimmer
import ROOT, os
class JetReCleaner_base:
def __init__(self,label=""):
self.label = "" if (label in ["",None]) else ("_"+label)
self.vars = ("pt","eta","phi","mass","btagCSV")
self.branches = [ ("nJetGood"+self.label, "I") ]
self.branches += [ ("JetGood"+self.label+"_"+V, "F", 20, "nJetGood"+self.label) for V in self.vars ]
def init(self,tree):
pass
def listBranches(self):
return self.branches
class JetReCleaner(JetReCleaner_base):
"""Pure python version, using collections and objects (0.7 kHz with treeReAnalyzer, 3.1 kHz with treeReAnalyzer2)"""
def __init__(self,label=""):
JetReCleaner_base.__init__(self,label)
def __call__(self,event):
leps = [l for l in Collection(event,"LepGood")]
jets = [j for j in Collection(event,"Jet")]
cleanJets = [ j for j in jets if min(deltaR(j,l) for l in leps) > 0.4 ]
ret = { 'nJetGood'+self.label : len(cleanJets) }
for V in self.vars:
ret[ 'JetGood'+self.label+"_"+V ] = [getattr(j,V) for j in cleanJets]
return ret
class JetReCleaner_TreeReaders(JetReCleaner_base):
"""Python version using TreeReaderArray for input (runs at ~10 kHz)"""
def __init__(self,label=""):
JetReCleaner_base.__init__(self,label)
def init(self,tree):
self._ttreereaderversion = tree._ttreereaderversion
for B in "nLepGood", "nJet": setattr(self, B, tree.valueReader(B))
for B in "eta", "phi" : setattr(self,"LepGood_"+B, tree.arrayReader("LepGood_"+B))
for v in self.vars:
setattr(self,"Jet_"+v, tree.arrayReader("Jet_"+v))
def makeCleanJets(self,event):
leps = [ (self.LepGood_eta[i],self.LepGood_phi[i]) for i in xrange(self.nLepGood.Get()[0]) ]
jets = [ (i, self.Jet_eta[i], self.Jet_phi[i]) for i in xrange(self.nJet.Get()[0]) ]
cleanJets = []
for ij,je,jp in jets:
good = True
for le,lp in leps:
if abs(je-le)<0.4 and deltaR(je,jp,le,lp)<0.4:
good = False; break
if good: cleanJets.append(ij)
return cleanJets
def __call__(self,event):
## Init
if event._tree._ttreereaderversion > self._ttreereaderversion:
self.init(event._tree)
## Algo
cleanJets = self.makeCleanJets(event)
## Output (python)
ret = { 'nJetGood'+self.label : len(cleanJets) }
for V in self.vars:
branch = getattr(self, "Jet_"+V)
ret[ 'JetGood'+self.label+"_"+V ] = [branch[j] for j in cleanJets]
return ret
class JetReCleaner_CollectionSkimmer(JetReCleaner_TreeReaders):
"""Python version, using TreeReaderArray for input and CollectionSkimmer for output (runs at ~17 kHz)"""
def __init__(self,label=""):
JetReCleaner_TreeReaders.__init__(self,label)
self._helper = CollectionSkimmer("JetGood"+self.label, "Jet", floats=self.vars, maxSize=20)
self.branches = [] # output is done in C++
def init(self,tree):
self._helper.initInputTree(tree)
self.initReaders(tree)
def initReaders(self,tree):
for B in "nLepGood", "nJet": setattr(self, B, tree.valueReader(B))
for B in "eta", "phi" : setattr(self,"LepGood_"+B, tree.arrayReader("LepGood_"+B))
for B in "eta", "phi" : setattr(self,"Jet_"+B, tree.arrayReader("Jet_"+B))
def setOutputTree(self,pytree):
self._helper.initOutputTree(pytree);
def __call__(self,event):
## Init
if self._helper.initEvent(event):
self.initReaders(event._tree)
## Algo
cleanJets = self.makeCleanJets(event)
## Output
self._helper.push_back_all(cleanJets)
return {}
class JetReCleaner_CppHelper(JetReCleaner_CollectionSkimmer):
"""Version using a C++ worker, and CollectionSkimmer for output, called directly from C++ (runs at ~43 kHz)"""
def __init__(self,label=""):
JetReCleaner_CollectionSkimmer.__init__(self,label)
if "/jetReCleanerExampleHelper_cxx.so" not in ROOT.gSystem.GetLibraries():
print "Load C++ Worker"
ROOT.gROOT.ProcessLine(".L %s/src/CMGTools/TTHAnalysis/python/tools/jetReCleanerExampleHelper.cxx+" % os.environ['CMSSW_BASE'])
self._worker = ROOT.JetReCleanerExampleHelper(self._helper.cppImpl())
def init(self,tree):
JetReCleaner_CollectionSkimmer.init(self,tree)
self.initWorker()
def initWorker(self):
self._worker.setLeptons(self.nLepGood, self.LepGood_eta, self.LepGood_phi)
self._worker.setJets(self.nJet, self.Jet_eta, self.Jet_phi)
def __call__(self,event):
## Init
if self._helper.initEvent(event):
self.initReaders(event._tree)
self.initWorker()
## Algo + Output
self._worker.run()
return {}
class JetReCleaner_CppHelper2(JetReCleaner_CppHelper):
"""Version using a C++ worker, and CollectionSkimmer for output, connected via python (runs at ~35 kHz)"""
def __init__(self,label=""):
JetReCleaner_CollectionSkimmer.__init__(self,label)
if "/jetReCleanerExampleHelper2_cxx.so" not in ROOT.gSystem.GetLibraries():
print "Load C++ Worker"
ROOT.gROOT.ProcessLine(".L %s/src/CMGTools/TTHAnalysis/python/tools/jetReCleanerExampleHelper2.cxx+" % os.environ['CMSSW_BASE'])
self._worker = ROOT.JetReCleanerExampleHelper2()
def __call__(self,event):
## Init
if self._helper.initEvent(event):
self.initReaders(event._tree)
self.initWorker()
## Algo
cleanJets = self._worker.run()
## Output
self._helper.push_back(cleanJets) #push_back, since it's a std::vector and not a python list
return {}
MODULES = [
('py', lambda : JetReCleaner()),
('tr', lambda : JetReCleaner_TreeReaders()),
('cs', lambda : JetReCleaner_CollectionSkimmer()),
('cpp1', lambda : JetReCleaner_CppHelper()),
('cpp2', lambda : JetReCleaner_CppHelper2()),
# A second instance, to check no concurrency issues
('2py', lambda : JetReCleaner("Another")),
('2tr', lambda : JetReCleaner_TreeReaders("Another")),
('2cs', lambda : JetReCleaner_CollectionSkimmer("Another")),
('2cpp1', lambda : JetReCleaner_CppHelper("Another")),
('2cpp2', lambda : JetReCleaner_CppHelper2("Another")),
]
| [
"gpetruc@gmail.com"
] | gpetruc@gmail.com |
bf072896d6e400e7db2237ccba5491061b9060dd | b23e2df299d53589d0b59916e91dc1d88c8d5d84 | /example.py | 579c4b4de81f21a9950cd9136ad662ad3b8f96c8 | [] | no_license | codesharedot/credo-price | b8f188ad548ea0d1bfb3807505c2ee22199f730a | a769dd96a0063274e0f419b58aa61aa4f6f1f2fe | refs/heads/master | 2020-08-10T07:30:14.805961 | 2019-10-10T22:05:37 | 2019-10-10T22:05:37 | 214,293,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | import requests
import json
from forex_python.converter import CurrencyRates
import os
c = CurrencyRates()
rate = c.get_rate('USD', 'EUR')
print(rate)
credo_api_url = 'https://api.coinmarketcap.com/v1/ticker/credo/'
response = requests.get(credo_api_url)
response_json = response.json()
print(response_json)
for coin in response.json():
price = coin.get("price_usd", "U$S Price not provided")
coin_price = float(("{0:.2f}").format(float(price)))
print("$ " + str(coin_price))
coin_price_eur = float(("{0:.2f}").format(float(price)*rate))
print("€ " + str(coin_price_eur))
| [
"codeto@sent.com"
] | codeto@sent.com |
beb40b18da0dd26a06fdcaa474f27fbdffa1f650 | 2d0e5f5c6dd2e44ecf4166c81caff17f39c0c638 | /매일 프로그래밍/20200202/solution.py | c811ec3f4bd5c04f08ba1cefd182e3e3630b7f58 | [] | no_license | limkeunhyeok/daily-coding | 17d120a9f499189be3250a501e73e312802508a9 | 960dad7758c99619da0a33c899d5d4d8d8ff524d | refs/heads/master | 2022-04-30T22:32:54.173400 | 2022-04-16T13:15:49 | 2022-04-16T13:15:49 | 174,705,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | # 간격(interval)로 이루어진 배열이 주어지면, 겹치는 간격 원소들을 합친 새로운 배열을 만드시오.
# 간격은 시작과 끝으로 이루어져 있으며 시작은 끝보다 작거나 같습니다.
def solution(intervals):
answer = []
points = []
for arr in intervals:
points += list(range(arr[0], arr[1] + 1))
points = list(set(points))
temp = []
for index in range(len(points) - 1):
if not temp:
temp.append(points[index])
if points[index] - points[index + 1] != -1:
temp.append(points[index])
answer.append(temp)
temp = []
else:
continue
temp.append(points[-1])
answer.append(temp)
return answer | [
"gorloom6425@naver.com"
] | gorloom6425@naver.com |
cc2513911c76db904e5c9cbf077e379eda09f4d9 | 4626631c5e68a13ed4dde041212da39d344d74d9 | /hpOneView/storage.py | 5ceac397e94d09914f239dce4e48c7c8df3c5614 | [
"MIT"
] | permissive | xod442/python-hpOneView | a1482677e3252dabf1e14f9349c119428331089f | b78fb81cba34992bb84ed3814aae04ce05ef913f | refs/heads/master | 2021-01-18T05:53:42.466348 | 2015-08-11T15:59:16 | 2015-08-11T15:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,900 | py | # -*- coding: utf-8 -*-
"""
storage.py
~~~~~~~~~~~~
This module implements settings HP OneView REST API
"""
__title__ = 'storage'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2015) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from hpOneView.common import *
from hpOneView.connection import *
from hpOneView.activity import *
from hpOneView.exceptions import *
class storage(object):
def __init__(self, con):
self._con = con
self._activity = activity(con)
def add_storage_system(self, host, user, passwd, blocking=True,
verbose=False):
request = {'ip_hostname': host,
'username': user,
'password': passwd}
task, body = self._con.post(uri['storage-systems'], request)
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return body
def update_storage_system(self, StorageSystem, blocking=True,
verbose=False):
task, body = self._con.put(StorageSystem['uri'], StorageSystem)
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return body
return task
def remove_storage_system(self, system, blocking=True, verbose=False):
task, body = self._con.delete(system['uri'])
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return task
def get_storage_systems(self):
body = get_members(self._con.get(uri['storage-systems']))
return body
def get_storage_pools(self):
body = self._con.get(uri['storage-pools'])
return body
def add_storage_pool(self, name, storageSystemUri, blocking=True,
verbose=False):
request = {'storageSystemUri': storageSystemUri,
'poolName': name}
task, body = self._con.post(uri['storage-pools'], request)
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
if 'type' in task and task['type'].startswith('Task'):
entity = self._activity.get_task_associated_resource(task)
server = self._con.get(entity['resourceUri'])
return server
return task
# Temporarly modify the headers passed for POST and DELTE on storage volume
# templates in order to work around a bug. Without these headers the call
# cause a NullPointerException on the appliance and a 400 gets returned.
def add_storage_volume_template(self, volTemplate, verbose=False):
ori_headers = self._con._headers
self._con._headers.update({'Accept-Language': 'en'})
self._con._headers.update({'Accept-Encoding': 'deflate'})
task, body = self._con.post(uri['vol-templates'], volTemplate)
self._con._headers = ori_headers
return body
# Temporarly modify the headers passed for POST and DELTE on storage volume
# templates in order to work around a bug. Without these headers the call
# cause a NullPointerException on the appliance and a 400 gets returned.
def remove_storage_volume_template(self, volTemplate, blocking=True,
verbose=False):
ori_headers = self._con._headers
self._con._headers.update({'Accept-Language': 'en'})
task, body = self._con.delete(volTemplate['uri'])
self._con._headers = ori_headers
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return body
return task
def get_attachable_volumes(self):
body = self._con.get(uri['attachable-volumes'])
return body
def get_storage_volume_templates(self):
body = self._con.get(uri['vol-templates'])
return body
def get_connectable_storage_volume_templates(self):
body = self._con.get(uri['connectable-vol'])
return body
def add_storage_volume(self, volume, blocking=True, verbose=False):
task, body = self._con.post(uri['storage-volumes'], volume)
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
if 'type' in task and task['type'].startswith('Task'):
entity = self._activity.get_task_associated_resource(task)
volume = self._con.get(entity['resourceUri'])
return volume
return task
def remove_storage_volume(self, volume, blocking=True,
verbose=False):
task, body = self._con.delete(volume['uri'])
if blocking is True:
task = self._activity.wait4task(task, tout=600, verbose=verbose)
return task
def copy_storage_volume(self, vol, dest_name, blocking=True,
verbose=False):
volume = make_storage_volume(dest_name,
vol['provisionedCapacity'],
vol['shareable'],
vol['storagePoolUri'],
vol['description'],
vol['provisionType'])
ret = self.add_storage_volume(volume, blocking, verbose)
return ret
# TODO remove the evil use/hack of the large count defaul once the
# OneView appliance honors -1 as a valid count vaule
def get_storage_volumes(self):
body = self._con.get(uri['storage-volumes'] + '?start=0&count=999999')
return body
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"troy@debdev.org"
] | troy@debdev.org |
86a21abfcda4b1aaa69a2fc37b412c0239b73a43 | 096ccaca86872b03a137edf58221413073d770cb | /spiders/tesdorpf.py | 0cd1a0fee30bba84725730ce4c2025df1816e3f9 | [] | no_license | DH-heima/webscrapping | f142962b50deed2628052dd7a48098a4afbcbada | 1dc8f81f45db0d4366391c3052c5ab36f4d4bc5d | refs/heads/master | 2022-02-02T23:26:22.520064 | 2019-06-13T13:38:10 | 2019-06-13T13:38:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,593 | py | from io import BytesIO
from lxml import etree
parser = etree.HTMLParser()
from urllib.parse import quote_plus
import requests_cache, imghdr
from validators import validate_raw_files
from create_csvs import create_csvs
from ers import all_keywords_de as keywords, mh_brands
from matcher import BrandMatcher
from ers import COLLECTION_DATE, file_hash, img_path_namer, fpath_namer, headers
import shutil
from helpers.random_user_agent import randomua
import requests
# Init variables and assets
shop_id = 'tesdorpf'
root_url = 'https://www.tesdorpf.de'
session = requests_cache.CachedSession(fpath_namer(shop_id, 'requests_cache'))
session.headers = {'User-Agent': randomua()}
country = 'DE'
searches, categories, products = {}, {}, {}
from parse import parse
def getprice(pricestr):
if pricestr.startswith('ab '):
pricestr = pricestr[3:]
if not pricestr:
return
price = parse('{pound:d} €', pricestr)
if price:
return price.named['pound'] * 100
price = parse('{pound:d},{pence:d} €', pricestr)
if price:
return price.named['pound'] * 100 + price.named['pence']
price = parse('{th:d}.{pound:d} €', pricestr)
if price:
return price.named['th'] * 100000 + price.named['pound'] * 100
price = parse('{th:d}.{pound:d},{pence:d} €', pricestr)
if price:
return price.named['th'] * 100000 + price.named['pound'] * 100 + price.named['pence']
print('pb price', pricestr)
raise Exception
categories_urls = {
'champagne': 'https://www.tesdorpf.de/sortiment/franzoesische-weine/champagne?page={page}',
'cognac': 'https://www.tesdorpf.de/suche?term=cognac&page={page}',
# 'sparkling': '',
# 'vodka': '',
'whisky': 'https://www.tesdorpf.de/suche?term=whisky&page={page}',
'still_wines': 'http://www.totalwine.com/wine/c/c0020?viewall=true&pagesize=100&page={page}'
}
def getproduct(a):
data = {
'url': a.xpath('.//div[@class="product-info-title"]/a/@href')[0],
'pdct_name_on_eretailer': a.xpath('.//div[@class="product-info-title"]/a/text()')[0].strip(),
'price': getprice(a.xpath('.//div[contains(@class, "product-info-price-current")]/text()')[0]),
'img': a.xpath('.//div/a/img/@src')[0]
}
assert data['price']
products[data['url']] = data
for cat, url in categories_urls.items():
categories[cat] = []
for page in range(1, 100):
r = session.get(url.format(page=page))
tree = etree.parse(BytesIO(r.content), parser=parser)
articles = tree.xpath('//li/div[@class="product-grid-item-inset"]')
aurls = [a.xpath('.//div[@class="product-info-title"]/a/@href')[0] for a in articles]
if not articles or all(a in categories[cat] for a in aurls):
break
print(cat, len(articles), len(categories[cat]))
categories[cat] += aurls
[getproduct(a) for a in articles]
for kw in keywords:
searches[kw] = []
for page in range(1, 10):
r = session.get('https://www.tesdorpf.de/suche?term={kw}&page={page}'.format(
page=page, kw=quote_plus(kw)))
tree = etree.parse(BytesIO(r.content), parser=parser)
articles = tree.xpath('//li/div[@class="product-grid-item-inset"]')
aurls = [a.xpath('.//div[@class="product-info-title"]/a/@href')[0] for a in articles]
if not articles or all(a in searches[kw] for a in aurls):
break
searches[kw] += aurls
[getproduct(a) for a in articles]
print(kw, len(articles), len(searches[kw]))
brm = BrandMatcher()
for url, product in products.items():
if brm.find_brand(product['pdct_name_on_eretailer'])['brand'] in mh_brands:
r = session.get('https://www.tesdorpf.de' + url)
print("IMAGE", 'https://www.tesdorpf.de' + url, '/tmp/' + shop_id + ' ' + product['pdct_name_on_eretailer'].replace('/', "-") + '.html')
with open('/tmp/' + shop_id + ' ' + product['pdct_name_on_eretailer'].replace('/', "-") + '.html', 'wb') as f:
f.write(r.content)
tree = etree.parse(BytesIO(r.content), parser=parser)
data = {
'pdct_img_main_url': 'https://www.tesdorpf.de' + "".join(tree.xpath('//img[@itemprop="image"]/@src')[:1]),
}
product.update(data)
# Download images
for url, pdt in products.items():
if 'pdct_img_main_url' in pdt and pdt['pdct_img_main_url'] and brm.find_brand(pdt['pdct_name_on_eretailer'])['brand'] in mh_brands:
print(pdt['pdct_name_on_eretailer'] + "." + pdt['pdct_img_main_url'].split('.')[-1])
response = requests.get(pdt['pdct_img_main_url'], stream=True, verify=False, headers=headers)
# response.raw.decode_content = True
tmp_file_path = '/tmp/' + shop_id + 'mhers_tmp_{}.imgtype'.format(abs(hash(pdt['pdct_img_main_url'])))
img_path = img_path_namer(shop_id, pdt['pdct_name_on_eretailer'])
with open(tmp_file_path, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
if imghdr.what(tmp_file_path) is not None:
img_path = img_path.split('.')[0] + '.' + imghdr.what('/tmp/' + shop_id + 'mhers_tmp_{}.imgtype'.format(abs(hash(pdt['pdct_img_main_url']))))
shutil.copyfile('/tmp/' + shop_id + 'mhers_tmp_{}.imgtype'.format(abs(hash(pdt['pdct_img_main_url']))), img_path)
products[url].update({'img_path': img_path, 'img_hash': file_hash(img_path)})
create_csvs(products, categories, searches, shop_id, fpath_namer(shop_id, 'raw_csv'), COLLECTION_DATE)
validate_raw_files(fpath_namer(shop_id, 'raw_csv')) | [
"pierre.chevalier@epitech.eu"
] | pierre.chevalier@epitech.eu |
d171d3418596e59a8f1566918c36da27762cdad7 | 6c7c008a4626fc77a408ab0ae9ea34d858bfc251 | /trails/feeds/pony.py | bbb5008a22e18572517263332a5fdc9e54d83c8f | [
"MIT"
] | permissive | silverheartshafiq/maltrail | e53ff19c6674be98f3d8300d6e99837fb83a420c | 819219527181ce4a85791156e80d10016cd6c867 | refs/heads/master | 2020-04-24T08:43:07.316096 | 2019-02-21T09:18:30 | 2019-02-21T09:18:30 | 171,839,662 | 0 | 0 | MIT | 2019-02-21T09:18:29 | 2019-02-21T09:18:28 | null | UTF-8 | Python | false | false | 742 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2019 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "https://cybercrime-tracker.net/ccpmgate.php"
__check__ = "/gate.php"
__info__ = "pony (malware)"
__reference__ = "cybercrime-tracker.net"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
if '://' in line:
line = re.search(r"://(.*)", line).group(1)
retval[line] = (__info__, __reference__)
return retval
| [
"miroslav.stampar@gmail.com"
] | miroslav.stampar@gmail.com |
825d744cc2b816c49caa2b0886ac625db04ac255 | 56cce3fee2e3d69d60958eb2aacc4f65fc3d2230 | /src/pybgl/product_mixin.py | 8be617c745bc114bbbcdacddbdfce9c425088f28 | [
"BSD-3-Clause"
] | permissive | nokia/PyBGL | 52c2f175d1dbccb15519f8a16de141845d0abaf3 | 707f2df32ede7d9a992ea217a4791da34f13e138 | refs/heads/master | 2023-08-08T04:46:24.931627 | 2023-08-03T16:31:35 | 2023-08-03T16:31:35 | 148,536,169 | 12 | 3 | BSD-3-Clause | 2023-08-03T16:31:36 | 2018-09-12T20:11:36 | Python | UTF-8 | Python | false | false | 1,805 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the pybgl project.
# https://github.com/nokia/pybgl
from pybgl.automaton import BOTTOM, Automaton, EdgeDescriptor
class ProductMixin:
def __init__(self, g12: Automaton, operator):
self.map_product_vertices = dict()
self.g12 = g12
self.operator = operator
def add_product_vertex(self, q1: int, g1: Automaton, q2: int, g2: Automaton) -> int:
q12 = self.g12.add_vertex()
if self.operator(g1.is_initial(q1), g2.is_initial(q2)):
self.g12.set_initial(q12)
if self.operator(g1.is_final(q1), g2.is_final(q2)):
self.g12.set_final(q12)
self.map_product_vertices[(q1, q2)] = q12
return q12
def add_product_edge(self, e1: EdgeDescriptor, g1: Automaton, e2: EdgeDescriptor, g2: Automaton):
if e1:
q1 = g1.source(e1)
r1 = g1.target(e1)
a = g1.label(e1)
else:
q1 = r1 = BOTTOM
if e2:
q2 = g2.source(e2)
r2 = g2.target(e2)
a = g2.label(e2)
else:
q2 = r2 = BOTTOM
q12 = self.get_or_create_product_vertex(q1, g1, q2, g2)
r12 = self.get_or_create_product_vertex(r1, g1, r2, g2)
return self.g12.add_edge(q12, r12, a)
def get_product_vertex(self, q1: int, q2: int) -> int:
return self.map_product_vertices.get((q1, q2))
def get_or_create_product_vertex(self, q1: int, g1: Automaton, q2: int, g2: Automaton) -> int:
if q1 is BOTTOM and q2 is BOTTOM:
raise RuntimeError("Tried to create (BOTTOM, BOTTOM) state.")
q12 = self.get_product_vertex(q1, q2)
if q12 is None:
q12 = self.add_product_vertex(q1, g1, q2, g2)
return q12
| [
"marc-olivier.buob@nokia-bell-labs.com"
] | marc-olivier.buob@nokia-bell-labs.com |
10dae104503fc05dbc42ea9b1f06e0942c8562a5 | bea82dcebdee1feddcb0b45e00db99ba208e45fb | /practice/14random_walks_and_more_about_data_visualization/birthday_problem.py | 711e3a9ea14b31daca05334c21c618d80061e327 | [] | no_license | LordBao666/MITLecture6.0002_introduction_to_computational_thinking_and_data_science | 56f1916b3eb148b63dc5e4103d28a401c5ca790f | f368c85e784dfa249bbec90983c68601e4516aa0 | refs/heads/master | 2023-03-30T16:51:59.153885 | 2021-04-06T15:16:52 | 2021-04-06T15:16:52 | 348,566,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | """
@Author : Lord_Bao
@Date : 2021/3/20
"""
import random
def same_date(num_people, num_same):
"""
:param num_people: 测试人数。
:param num_same: 测试指标,即生日相等的人数
:return: 返回测试人数中,同一天的生日的最高人数 是否大于 测试指标、 这是采样!!!!!!
e.g 假设num_people 为10 ,num_same 为 3。该函数返回的就是10人中,是否至少有3个人的生日在同一天。
是的化返回True,否则返回False。
这里的生日分布是 根据 random 和 num_people 随机模拟的。显然,可以另写一个函数,传入一个数据结构存放生日状况也行。
假设一年366天(即包含2月29号)
"""
birthdays = [0] * 366 # 列表是支持 * 操作的,这个目的是创建长度为366的列表
possible_date = range(366)
for p in range(num_people):
# 0 - 365中任选一个,选中的称作day 那么 birthdays[day ] + 1。也就是又有一个人的生日是day +1那天
day = random.choice(possible_date)
birthdays[day] += 1
return max(birthdays) >= num_same
def birthday_prob(num_people, num_same, num_trials):
"""
:param num_people: 测试人数
:param num_same: 测试指标(num_people中,至少有num_same人共享生日)
:param num_trials: 测试次数
:return: 调用 same_date 函数num_trials次,返回 num_people中,至少有num_same人共享生日的次数/num_trials
其中 至少有num_same人共享生日的次数 用 num_hits存储。
测试人数的生日分布随机产生。
当num_trials 足够大时,就可以模拟正常的概率。当然这里的生日数据是随机模拟的,可能反而不太准确。
比如灾年出生率低,受影响的因素很大。可能不同的date的权重不一样。
"""
num_hits = 0
for trial in range(num_trials):
if same_date(num_people, num_same):
num_hits += 1
return num_hits / num_trials
def same_date_with_different_weight(num_people, num_same):
# 实际情况是 不同天数生小孩的概率是不一样的。举个例子,女性怀孕40周分娩
# 那么是9个月10天左右。而春节这种长假夫妻聚的时间长一点。那么我估计 10月 --12月的孩子估计
# 更多吧。
# 按照4年来算,2月29号比较奇葩,只有1天,单独拎出来
# 4 * list(range(180, 270)) 是因为 6月到9月出生的人数的更多,这里理解为加权。
# 具体参照从MIT Lecture4 的PPT,照片见common_birthday.png
possible_date = 4 * list(range(0, 57)) + [58] \
+ 4 * list(range(59, 366)) \
+ 4 * list(range(180, 270))
birthdays = [0] * 366
for p in range(num_people):
# 0 - 365中任选一个,选中的称作day 那么 birthdays[day ] + 1。也就是又有一个人的生日是day +1那天
day = random.choice(possible_date)
birthdays[day] += 1
return max(birthdays) >= num_same
def birthday_prob_with_different_weight(num_people, num_same, num_trials):
num_hits = 0
for trial in range(num_trials):
if same_date_with_different_weight(num_people, num_same):
num_hits += 1
return num_hits / num_trials
| [
"916900021@qq.com"
] | 916900021@qq.com |
65eacfa5fd125f2d84127ac7eec2389c25243da8 | a5c094ebfd36e4b43b9d4a2f61b5a37812a1ff03 | /src/gym_selfx/render/draw.py | f7eb6e9870586c3a8fc3dd34ee8247899a75163b | [
"MIT"
] | permissive | mountain/selfx | 47d737baaa639f4ff001ac1f1715bcd67ab7f495 | 939fd4ed83d06d26aec15b606429fd75598cef80 | refs/heads/master | 2023-04-07T09:37:42.432853 | 2022-12-10T00:05:57 | 2022-12-10T00:05:57 | 191,362,875 | 7 | 3 | MIT | 2023-08-16T11:28:25 | 2019-06-11T12:00:47 | Python | UTF-8 | Python | false | false | 5,802 | py | # -*- coding: utf-8 -*-
#
# Python version Copyright (c) 2015 John Stowers
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import cv2
import random
import numpy as np
from Box2D import (b2Color, b2DistanceJoint, b2MouseJoint, b2PulleyJoint)
from Box2D.Box2D import (b2_staticBody as staticBody, b2_dynamicBody as dynamicBody, b2_kinematicBody as kinematicBody, b2PolygonShape as polygonShape,
b2CircleShape as circleShape, b2LoopShape as loopShape, b2EdgeShape as edgeShape)
import matplotlib.pyplot as plt
def cvcolor(color):
return int(255.0 * color[2]), int(255.0 * color[1]), int(255.0 * color[0])
def cvcoord(pos):
return tuple(map(int, pos))
class OpencvDrawFuncs(object):
def __init__(self, w, h, ppm, fill_polygon=True, flip_y=True):
self._w = w
self._h = h
self._ppm = ppm
self._colors = {
staticBody: (255, 255, 255),
dynamicBody: (255, 0, 0),
kinematicBody: (127, 255, 230),
}
self._fill_polygon = fill_polygon
self._flip_y = flip_y
self.screen = np.zeros((self._h, self._w, 3), np.uint8)
def install(self):
polygonShape.draw = self._draw_polygon
circleShape.draw = self._draw_circle
loopShape.draw = self._draw_loop
edgeShape.draw = self._draw_edge
def draw_world(self, world):
for body in world.bodies:
for fixture in body.fixtures:
fixture.shape.draw(body, fixture)
for joint in world.joints:
self._draw_joint(joint)
def clear_screen(self, screen=None):
if screen is None:
self.screen.fill(0)
else:
self.screen = screen
def _fix_vertices(self, vertices):
if self._flip_y:
return [(v[0], self._h - v[1]) for v in vertices]
else:
return [(v[0], v[1]) for v in vertices]
def _draw_joint(self, joint):
bodyA, bodyB = joint.bodyA, joint.bodyB
xf1, xf2 = bodyA.transform, bodyB.transform
x1, x2 = xf1.position, xf2.position
p1, p2 = joint.anchorA, joint.anchorB
color = b2Color(0.5, 0.8, 0.8)
x1, x2, p1, p2 = self._fix_vertices((x1 * self._ppm, x2 * self._ppm,
p1 * self._ppm, p2 * self._ppm))
if isinstance(joint, b2DistanceJoint):
cv2.line(self.screen, cvcoord(p1), cvcoord(p2), cvcolor(color), 1)
elif isinstance(joint, b2PulleyJoint):
s1, s2 = joint.groundAnchorA, joint.groundAnchorB
s1, s2 = self._fix_vertices((s1 * self._ppm, s2 * self._ppm))
cv2.line(self.screen, cvcoord(s1), cvcoord(p1), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(s2), cvcoord(p2), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(s1), cvcoord(s2), cvcolor(color), 1)
elif isinstance(joint, b2MouseJoint):
pass # don't draw it here
else:
cv2.line(self.screen, cvcoord(x1), cvcoord(p1), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(p1), cvcoord(p2), cvcolor(color), 1)
cv2.line(self.screen, cvcoord(x2), cvcoord(p2), cvcolor(color), 1)
def _draw_polygon(self, body, fixture):
polygon = fixture.shape
transform = body.transform
vertices = self._fix_vertices([transform * v * self._ppm
for v in polygon.vertices])
pts = np.array(vertices, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(self.screen, [pts], True, self._colors[body.type])
if self._fill_polygon:
lightc = np.array(self._colors[body.type], dtype=int) * 0.5
cv2.fillPoly(self.screen, [pts], lightc)
def _draw_circle(self, body, fixture):
circle = fixture.shape
position = self._fix_vertices(
[body.transform * circle.pos * self._ppm])[0]
if self._fill_polygon:
cv2.circle(self.screen, cvcoord(position), int(
circle.radius * self._ppm), body.userData['color'], -1)
else:
cv2.circle(self.screen, cvcoord(position), int(
circle.radius * self._ppm), body.userData['color'], 1)
def _draw_edge(self, body, fixture):
edge = fixture.shape
v = [body.transform * edge.vertex1 * self._ppm,
body.transform * edge.vertex2 * self._ppm]
vertices = self._fix_vertices(v)
cv2.line(self.screen, cvcoord(vertices[0]),
cvcoord(vertices[1]), self._colors[body.type], 1)
def _draw_loop(self, body, fixture):
loop = fixture.shape
transform = body.transform
vertices = self._fix_vertices([transform * v * self._ppm
for v in loop.vertices])
v1 = vertices[-1]
for v2 in vertices:
cv2.line(self.screen, cvcoord(v1), cvcoord(v2),
self._colors[body.type], 1)
v1 = v2
| [
"mingli.yuan@gmail.com"
] | mingli.yuan@gmail.com |
fcca4b3d647d353de230a11c03610d03db3b31c4 | 4a06e92024030a0cee216825bee6d659e589f944 | /hive/indexer/blocks.py | 71aa9bb9544db57643c167fb9f51e3e5304e1587 | [
"MIT"
] | permissive | tiotdev/hivemind | ba99962e11984715c2d0d4933275c40703071f95 | 4765989650b743fbb43ed02afc07c83e8f4a15ca | refs/heads/master | 2020-04-21T07:13:33.333666 | 2019-03-27T15:24:39 | 2019-03-27T15:24:39 | 169,387,340 | 0 | 0 | MIT | 2020-04-05T17:39:16 | 2019-02-06T10:17:37 | Python | UTF-8 | Python | false | false | 8,144 | py | """Blocks processor."""
import logging
from hive.db.adapter import Db
from hive.indexer.accounts import Accounts
from hive.indexer.posts import Posts
from hive.indexer.cached_post import CachedPost
from hive.indexer.custom_op import CustomOp
from hive.indexer.payments import Payments
from hive.indexer.follow import Follow
log = logging.getLogger(__name__)
DB = Db.instance()
class Blocks:
"""Processes blocks, dispatches work, manages `hive_blocks` table."""
@classmethod
def head_num(cls):
"""Get hive's head block number."""
sql = "SELECT num FROM hive_blocks ORDER BY num DESC LIMIT 1"
return DB.query_one(sql) or 0
@classmethod
def head_date(cls):
"""Get hive's head block date."""
sql = "SELECT created_at FROM hive_blocks ORDER BY num DESC LIMIT 1"
return str(DB.query_one(sql) or '')
@classmethod
def process(cls, block):
"""Process a single block. Always wrap in a transaction!"""
#assert is_trx_active(), "Block.process must be in a trx"
return cls._process(block, is_initial_sync=False)
@classmethod
def process_multi(cls, blocks, is_initial_sync=False):
"""Batch-process blocks; wrapped in a transaction."""
DB.query("START TRANSACTION")
last_num = 0
try:
for block in blocks:
last_num = cls._process(block, is_initial_sync)
except Exception as e:
log.error("exception encountered block %d", last_num + 1)
raise e
# Follows flushing needs to be atomic because recounts are
# expensive. So is tracking follows at all; hence we track
# deltas in memory and update follow/er counts in bulk.
Follow.flush(trx=False)
DB.query("COMMIT")
@classmethod
def _process(cls, block, is_initial_sync=False):
"""Process a single block. Assumes a trx is open."""
num = cls._push(block)
date = block['timestamp']
account_names = set()
comment_ops = []
json_ops = []
delete_ops = []
for tx_idx, tx in enumerate(block['transactions']):
for operation in tx['operations']:
op_type = operation['type']
op = operation['value']
# account ops
if op_type == 'pow_operation':
account_names.add(op['worker_account'])
elif op_type == 'pow2_operation':
account_names.add(op['work']['value']['input']['worker_account'])
elif op_type == 'account_create_operation':
account_names.add(op['new_account_name'])
elif op_type == 'account_create_with_delegation_operation':
account_names.add(op['new_account_name'])
elif op_type == 'create_claimed_account_operation':
account_names.add(op['new_account_name'])
# post ops
elif op_type == 'comment_operation':
comment_ops.append(op)
elif op_type == 'delete_comment_operation':
delete_ops.append(op)
elif op_type == 'vote_operation':
if not is_initial_sync:
CachedPost.vote(op['author'], op['permlink'])
# misc ops
elif op_type == 'transfer_operation':
Payments.op_transfer(op, tx_idx, num, date)
elif op_type == 'custom_json_operation':
json_ops.append(op)
Accounts.register(account_names, date) # register any new names
Posts.comment_ops(comment_ops, date) # handle inserts, edits
Posts.delete_ops(delete_ops) # handle post deletion
CustomOp.process_ops(json_ops, num, date) # follow/reblog/community ops
return num
@classmethod
def verify_head(cls, steem):
"""Perform a fork recovery check on startup."""
hive_head = cls.head_num()
if not hive_head:
return
# move backwards from head until hive/steem agree
to_pop = []
cursor = hive_head
while True:
assert hive_head - cursor < 25, "fork too deep"
hive_block = cls._get(cursor)
steem_hash = steem.get_block(cursor)['block_id']
match = hive_block['hash'] == steem_hash
log.info("[INIT] fork check. block %d: %s vs %s --- %s",
hive_block['num'], hive_block['hash'],
steem_hash, 'ok' if match else 'invalid')
if match:
break
to_pop.append(hive_block)
cursor -= 1
if hive_head == cursor:
return # no fork!
log.error("[FORK] depth is %d; popping blocks %d - %d",
hive_head - cursor, cursor + 1, hive_head)
# we should not attempt to recover from fork until it's safe
fork_limit = steem.last_irreversible()
assert cursor < fork_limit, "not proceeding until head is irreversible"
cls._pop(to_pop)
@classmethod
def _get(cls, num):
"""Fetch a specific block."""
sql = """SELECT num, created_at date, hash
FROM hive_blocks WHERE num = :num LIMIT 1"""
return dict(DB.query_row(sql, num=num))
@classmethod
def _push(cls, block):
"""Insert a row in `hive_blocks`."""
num = int(block['block_id'][:8], base=16)
txs = block['transactions']
DB.query("INSERT INTO hive_blocks (num, hash, prev, txs, ops, created_at) "
"VALUES (:num, :hash, :prev, :txs, :ops, :date)", **{
'num': num,
'hash': block['block_id'],
'prev': block['previous'],
'txs': len(txs),
'ops': sum([len(tx['operations']) for tx in txs]),
'date': block['timestamp']})
return num
@classmethod
def _pop(cls, blocks):
"""Pop head blocks to navigate head to a point prior to fork.
Without an undo database, there is a limit to how fully we can recover.
If consistency is critical, run hive with TRAIL_BLOCKS=-1 to only index
up to last irreversible. Otherwise use TRAIL_BLOCKS=2 to stay closer
while avoiding the vast majority of microforks.
As-is, there are a few caveats with the following strategy:
- follow counts can get out of sync (hive needs to force-recount)
- follow state could get out of sync (user-recoverable)
For 1.5, also need to handle:
- hive_communities
- hive_members
- hive_flags
- hive_modlog
"""
DB.query("START TRANSACTION")
for block in blocks:
num = block['num']
date = block['date']
log.warning("[FORK] popping block %d @ %s", num, date)
assert num == cls.head_num(), "can only pop head block"
# get all affected post_ids in this block
sql = "SELECT id FROM hive_posts WHERE created_at >= :date"
post_ids = tuple(DB.query_col(sql, date=date))
# remove all recent records
DB.query("DELETE FROM hive_posts_cache WHERE post_id IN :ids", ids=post_ids)
DB.query("DELETE FROM hive_feed_cache WHERE created_at >= :date", date=date)
DB.query("DELETE FROM hive_reblogs WHERE created_at >= :date", date=date)
DB.query("DELETE FROM hive_follows WHERE created_at >= :date", date=date) #*
DB.query("DELETE FROM hive_post_tags WHERE post_id IN :ids", ids=post_ids)
DB.query("DELETE FROM hive_posts WHERE id IN :ids", ids=post_ids)
DB.query("DELETE FROM hive_payments WHERE block_num = :num", num=num)
DB.query("DELETE FROM hive_blocks WHERE num = :num", num=num)
DB.query("COMMIT")
log.warning("[FORK] recovery complete")
# TODO: manually re-process here the blocks which were just popped.
| [
"roadscape@users.noreply.github.com"
] | roadscape@users.noreply.github.com |
9650b953bf48ff69844f4d505df00d9978a03a49 | 9c82dc938056000e4b88c3e05a851f379d2d3bed | /devel/lib/python2.7/dist-packages/learning_actionlib/msg/_FibonacciResult.py | fb1130eccc110ac0dd77fc13bf5fd7be6f24beaf | [] | no_license | LongfeiProjects/ROS_Catkin_WS | 971e65a49139c7f20869617da53b506ec9a996df | 6a3390fd531ce498af634b83f9df18b9aae33c8e | refs/heads/master | 2021-03-22T00:32:57.911668 | 2016-03-03T04:45:26 | 2016-03-03T04:45:26 | 45,441,951 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | py | """autogenerated by genpy from learning_actionlib/FibonacciResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FibonacciResult(genpy.Message):
_md5sum = "b81e37d2a31925a0e8ae261a8699cb79"
_type = "learning_actionlib/FibonacciResult"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# result definition
int32[] sequence
"""
__slots__ = ['sequence']
_slot_types = ['int32[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
sequence
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FibonacciResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.sequence is None:
self.sequence = []
else:
self.sequence = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.sequence)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.sequence))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.sequence = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.sequence)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.sequence.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.sequence = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| [
"longfei.zhao@yahoo.com"
] | longfei.zhao@yahoo.com |
e7a6b61d69f6efde1bd4318a3138e41f0bacee73 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_sperm.py | 690578bfe35a6c4408888820bf2e97a004fda804 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py |
#calss header
class _SPERM():
def __init__(self,):
self.name = "SPERM"
self.definitions = [u'a sex cell produced by a man or male animal: ', u'informal for semen (= the liquid produced by the male sex organs that contains sperm)']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e7d3461b5fae2b11cf3afd291d811480b48bf0aa | f7583188b39f53b9d3661912c10e64e70e80b3a8 | /quant-stuff/gs-quant-master/gs_quant/analytics/processors/analysis_processors.py | 86e8f772d7b1ecdb72983ba6a8d2fde0e948a593 | [
"Apache-2.0"
] | permissive | masa4u/downloads-archive-3 | 0e96edb8b8e78e37258f896b833a658ec70e1ccb | b90f3ea87fca28f0f035122994e0bff2341df0fa | refs/heads/master | 2023-04-27T22:11:33.349908 | 2021-05-10T05:05:07 | 2021-05-10T05:05:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | """
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from typing import Optional
from gs_quant.analytics.core.processor import BaseProcessor, DataCoordinateOrProcessor, DateOrDatetimeOrRDate
from gs_quant.analytics.core.processor_result import ProcessorResult
from gs_quant.timeseries import diff
class DiffProcessor(BaseProcessor):
def __init__(self,
a: DataCoordinateOrProcessor,
*,
obs: int = 1,
start: Optional[DateOrDatetimeOrRDate] = None,
end: Optional[DateOrDatetimeOrRDate] = None):
""" DiffProcessor
:param a: DataCoordinate or BaseProcessor for the series
:param obs: number of observations to lag
:param start: start date or time used in the underlying data query
:param end: end date or time used in the underlying data query
**Usage**
Compute the difference in series values over a given lag:
:math:`R_t = X_t - X_{t-obs}`
where :math:`obs` is the number of observations to lag series in diff function
"""
super().__init__()
self.children['a'] = a
self.obs = obs
self.start = start
self.end = end
def process(self):
a_data = self.children_data.get('a')
if isinstance(a_data, ProcessorResult):
if a_data.success:
result = diff(a_data.data, self.obs)
self.value = ProcessorResult(True, result)
else:
self.value = ProcessorResult(False, "DiffProcessor does not have 'a' series values yet")
else:
self.value = ProcessorResult(False, "DiffProcessor does not have 'a' series yet")
def get_plot_expression(self):
pass
| [
"bgoonz4212@gmail.com"
] | bgoonz4212@gmail.com |
d1c182d825bd932bd58bc399f49c88d47117f5e3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_monster.py | e42f32cbb962e49f1f74d9429ff41a7b784e1cc8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py |
#calss header
class _MONSTER():
def __init__(self,):
self.name = "MONSTER"
self.definitions = [u'very big: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
829acdd74d8983fb7d697b195815a539a258ac13 | 7d015f47e0d38f9fa11a34b13ed203b73cc2d2ba | /0x15-api/1-export_to_CSV.py | f82d49d6f075b2bb28a6cfa8a10f527df2f80dae | [] | no_license | eodenyire/holberton-system_engineering-devops-23 | c103bbf9268db05619666231ab49aa3ebb66ebcf | 89f02ea165322858921b66904a8245988b383674 | refs/heads/master | 2023-08-18T02:57:31.023569 | 2018-01-30T07:51:14 | 2018-01-30T07:51:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | #!/usr/bin/python3
"""Script to get information from the TODO api endpoint as it
pertains to a particular employee identified by ID."""
import csv
import requests
import sys
todo_endpoint = "https://jsonplaceholder.typicode.com/todos"
user_endpoint = "https://jsonplaceholder.typicode.com/users"
def get_todos_by_userid(user_id):
"""Get TODO list for a user identified by `user_id`"""
payload = {'userId': user_id}
todos = requests.get(todo_endpoint, params=payload)
try:
return todos.json()
except:
exit(1)
def get_user_by_userid(user_id):
"""Get the username of auser identified by `user_id`"""
payload = {'id': user_id}
user = requests.get(user_endpoint, params=payload)
try:
return user.json()[0]
except:
exit(1)
def format_user_todos(user_id):
"""Format employee TODO list as a string"""
todos = get_todos_by_userid(user_id)
complete = list(filter(lambda t: t.get('completed') is True, todos))
name = get_user_by_userid(user_id).get('name')
output = "Employee {} is done with tasks({}/{}):\n\t".format(name,
len(complete),
len(todos))
output += '\n\t'.join(map(lambda c: c.get('title'), complete))
return output
def export_csv_user_todos(user_id):
"""Export employee TODO list to a csv file"""
todos = get_todos_by_userid(user_id)
username = get_user_by_userid(user_id).get('username')
data = [dict(userId=user_id,
username=username,
completed=todo.get('completed'),
title=todo.get('title'))
for todo in todos]
with open("{}.csv".format(user_id), 'w', newline="") as f:
fieldnames = ["userId", "username", "completed", "title"]
writer = csv.DictWriter(f,
fieldnames=fieldnames,
quoting=csv.QUOTE_ALL)
writer.writerows(data)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: ./0-gather_data_from_an_API.py <employee_id>")
exit(1)
user_id = sys.argv[1]
export_csv_user_todos(user_id)
| [
"andrew.birnberg@gmail.com"
] | andrew.birnberg@gmail.com |
ceb18c46e219ea7d400cdd5576594ab240fb515f | 4dcf41e11d13258fa240c849fe8ed81c36c761af | /cirq/aqt/aqt_device.py | 616c3f627d1ed0c10d13e5d28ac2c7b08746b29a | [
"Apache-2.0"
] | permissive | 1eedaegon/Cirq | 717920bdc4ee265ca5c39abc84d1b136c25d0343 | de0c5e855069bba71e55b070fc9b06f58c07a861 | refs/heads/master | 2020-08-28T14:21:01.531296 | 2019-10-25T22:59:12 | 2019-10-25T22:59:12 | 217,724,457 | 1 | 0 | Apache-2.0 | 2019-10-26T14:55:14 | 2019-10-26T14:55:14 | null | UTF-8 | Python | false | false | 7,078 | py | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Current device parameters for the AQT/UIBK ion trap device
The device is based on a linear calcium ion string with
arbitrary connectivity. For more information see:
https://quantumoptics.at/en/publications/journal-articles.html
https://iopscience.iop.org/article/10.1088/1367-2630/15/12/123012/meta
The native gate set consists of the local gates: X,Y, and XX entangling gates
"""
import json
from typing import Union, Tuple, List, Sequence, cast
import numpy as np
from cirq import ops, devices, study
from cirq import Circuit, LineQubit, IonDevice, Duration
from cirq import DensityMatrixSimulator
gate_dict = {'X': ops.X, 'Y': ops.Y, 'MS': ops.XX}
def get_op_string(op_obj: ops.Operation):
"""Find the string representation for a given gate
Params:
op_obj: Gate object, out of: XXPowGate, XPowGate, YPowGate"""
if isinstance(op_obj, ops.XXPowGate) or isinstance(op_obj.gate,
ops.XXPowGate):
op_str = 'MS'
elif isinstance(op_obj, ops.XPowGate) or isinstance(op_obj.gate,
ops.XPowGate):
op_str = 'X'
elif isinstance(op_obj, ops.YPowGate) or isinstance(op_obj.gate,
ops.YPowGate):
op_str = 'Y'
elif isinstance(op_obj, ops.MeasurementGate) or isinstance(
op_obj.gate, ops.MeasurementGate):
op_str = 'Meas'
else:
raise ValueError('Got unknown gate:', op_obj)
return op_str
class AQTNoiseModel(devices.NoiseModel):
"""A noise model for the AQT ion trap """
def __init__(self):
self.noise_op_dict = get_default_noise_dict()
def noisy_moment(self, moment: ops.Moment,
system_qubits: Sequence[ops.Qid]):
noise_list = []
for op in moment.operations:
op_str = get_op_string(op)
try:
noise_op = self.noise_op_dict[op_str]
except KeyError:
break
for qubit in op.qubits:
noise_list.append(noise_op.on(qubit))
noise_list += self.get_crosstalk_operation(op, system_qubits)
return list(moment) + noise_list
def get_crosstalk_operation(self, operation: ops.Operation,
system_qubits: Sequence[ops.Qid]):
"""
Returns operation on
Args:
operation: Ideal operation
system_qubits: Tuple of line qubits
"""
cast(Tuple[LineQubit], system_qubits)
num_qubits = len(system_qubits)
xtlk_arr = np.zeros(num_qubits)
for qubit in operation.qubits:
idx = system_qubits.index(qubit)
neighbors = [idx - 1, idx + 1]
for neigh_idx in neighbors:
if neigh_idx >= 0 and neigh_idx < num_qubits:
xtlk_arr[neigh_idx] = self.noise_op_dict['crosstalk']
xtlk_op_list = []
op_str = get_op_string(operation)
if len(operation.qubits) == 1:
for idx in xtlk_arr.nonzero()[0]:
exponent = operation.gate.exponent #type:ignore
exponent = exponent * xtlk_arr[idx]
xtlk_op = gate_dict[op_str].on(system_qubits[idx])**exponent
xtlk_op_list.append(xtlk_op)
#TODO: Add xtalk for 2 qubit operations
return xtlk_op_list
class AQTSimulator:
"""A simulator for the AQT device."""
def __init__(self,
num_qubits: int,
circuit: Circuit = Circuit(),
simulate_ideal: bool = False,
noise_dict: Union[dict, None] = None):
"""Initializes the AQT simulator
Args:
num_qubits: Number of qubits
circuit: Optional, circuit to be simulated.
Last moment needs to be a measurement over all qubits with key 'm'
simulate_ideal: If True, an ideal circuit will be simulated
"""
self.circuit = circuit
self.num_qubits = num_qubits
self.qubit_list = LineQubit.range(num_qubits)
if noise_dict is None:
noise_dict = get_default_noise_dict()
self.noise_dict = noise_dict
self.simulate_ideal = simulate_ideal
def generate_circuit_from_list(self, json_string: str):
"""Generates a list of cirq operations from a json string
Args:
json_string: json that specifies the sequence
"""
self.circuit = Circuit()
# TODO add ion device here, is this still required?
json_obj = json.loads(json_string)
for gate_list in json_obj:
gate = gate_list[0]
angle = gate_list[1]
qubits = [self.qubit_list[i] for i in gate_list[2]]
self.circuit.append(gate_dict[gate].on(*qubits)**angle)
# TODO: Better solution for measurement at the end
self.circuit.append(
ops.measure(*[qubit for qubit in self.qubit_list], key='m'))
def simulate_samples(self, repetitions: int) -> study.TrialResult:
"""Samples the circuit
Args:
repetitions: Number of times the circuit is simulated
Returns:
TrialResult from Cirq.Simulator
"""
if self.simulate_ideal:
noise_model = devices.NO_NOISE
else:
noise_model = AQTNoiseModel()
if self.circuit == Circuit():
raise RuntimeError('simulate ideal called without a valid circuit')
sim = DensityMatrixSimulator(noise=noise_model)
result = sim.run(self.circuit, repetitions=repetitions)
return result
def get_aqt_device(num_qubits: int) -> Tuple[IonDevice, List[LineQubit]]:
"""Returns an AQT ion device
Args:
num_qubits: number of qubits
Returns:
IonDevice, qubit_list
"""
qubit_list = LineQubit.range(num_qubits)
us = 1000 * Duration(nanos=1)
ion_device = IonDevice(measurement_duration=100 * us,
twoq_gates_duration=200 * us,
oneq_gates_duration=10 * us,
qubits=qubit_list)
return ion_device, qubit_list
def get_default_noise_dict():
"""Returns the current noise parameters"""
default_noise_dict = {
'X': ops.depolarize(1e-3),
'Y': ops.depolarize(1e-3),
'MS': ops.depolarize(1e-2),
'crosstalk': 0.03
}
return default_noise_dict
| [
"craiggidney+github+cirqbot@google.com"
] | craiggidney+github+cirqbot@google.com |
f9cede8b8b2b03a877bd06d1b635572544e6a1d2 | 133ab0e162c38f2c174f6065c251f7ab6e44eac7 | /test/api_poke.py | 38acffc68860d35bf19ade1f47131b7c155093b6 | [] | no_license | weijihao/wlnupdates | b3d1856bb65e97467e6807f3ab8abb236104ec2d | 0825d354ade8cfbe40117662ee46704573453e16 | refs/heads/master | 2020-04-13T10:14:33.359175 | 2018-12-07T11:02:32 | 2018-12-07T11:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py |
import logSetup
import json
import webFunctions
# if __name__ == "__main__":
# logSetup.initLogging()
MODES = [
'get',
'get-artists',
'get-authors',
'get-genres',
'get-groups',
'get-publishers',
'get-tags',
'get-oel-releases',
'get-releases',
'get-translated-releases',
'get-oel-series',
'get-series',
'get-translated-series',
'get-artist-id',
'get-author-id',
'get-tag-id',
'get-genre-id',
'get-publisher-id',
'get-group-id',
'get-artist-data',
'get-author-data',
'get-tag-data',
'get-genre-data',
'get-publisher-data',
'get-group-data',
'get-series-id',
'get-series-data',
'get-feeds',
'get-watches',
'enumerate-tags',
'enumerate-genres',
'search-title',
'search-advanced',
]
def test():
wg = webFunctions.WebGetRobust()
endpoint = "http://127.0.0.1:5000/api"
endpoint = "https://www.wlnupdates.com/api"
for mode in MODES:
post = {
'mode' : mode,
'id' : 3,
}
print("Request: ", post)
pg = wg.getpage(endpoint, postJson=post)
print(json.loads(pg))
# for letter in "abcdefghijklmnopqrstuvwxyz0123456789":
# for page in range(4):
# post = {
# 'mode' : mode,
# 'offset' : page+1,
# 'prefix' : letter,
# }
# # post = {
# # 'mode' : mode,
# # # 'id' : 1,
# # }
# print("Request: ", post)
# pg = wg.getpage("http://127.0.0.1:5000/api", postJson=post)
# print(pg)
post = {
'mode' : 'search-title',
'title' : "",
}
print("Request: ", post)
pg = wg.getpage(endpoint, postJson=post)
print(pg)
post = {
'mode' : 'search-advanced',
# 'series-type' : {'Translated' : 'included'},
# 'tag-category' : {
# 'litrpg' : 'included',
# },
# 'sort-mode' : "update",
'title-search-text' : "a a",
# 'chapter-limits' : [1, 0],
}
print("Request: ", post)
pg = wg.getpage(endpoint, postJson=post)
print(pg)
include_options = ['covers', 'tags', 'genres', 'description']
for include in include_options:
post = {
'mode' : 'search-advanced',
# 'series-type' : {'Translated' : 'included'},
# 'tag-category' : {
# 'litrpg' : 'included',
# },
'sort-mode' : "update",
'title-search-text' : "Fire Girl",
'chapter-limits' : [40, 0],
'include-results' : [include]
}
print("Request: ", post)
pg = wg.getpage(endpoint, postJson=post)
print(pg)
if __name__ == "__main__":
test()
| [
"something@fake-url.com"
] | something@fake-url.com |
22cc013142784a19464aa7c938ac1b90819f859e | 11aac6edab131293027add959b697127bf3042a4 | /reachableNodes.py | 464b86cccd6e7968fcad2660c0ed59b911e18dc5 | [] | no_license | jdanray/leetcode | a76b3436002b31865967b757b73c85992636383b | fd736af3e79899b86dac89d4d925d5bd985944ad | refs/heads/master | 2023-08-15T01:20:05.110565 | 2023-08-14T00:25:58 | 2023-08-14T00:25:58 | 148,686,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # https://leetcode.com/problems/reachable-nodes-with-restrictions/
class Solution(object):
def reachableNodes(self, n, edges, restricted):
restricted = set(restricted)
graph = collections.defaultdict(set)
for (u, v) in edges:
graph[u].add(v)
graph[v].add(u)
seen = set()
stack = [0]
while stack:
u = stack.pop()
seen.add(u)
for v in graph[u]:
if v not in restricted and v not in seen:
stack.append(v)
return len(seen)
| [
"jdanray@users.noreply.github.com"
] | jdanray@users.noreply.github.com |
33810974339334cf8f9d705b437ca520ec839c0e | b9c33f67fa66839ee18930e2679ac8f3a1b450fe | /build/fmauch_universal_robot/ur10_e_moveit_config/catkin_generated/pkg.installspace.context.pc.py | 0b2eccbe76ad4ee047002faf505f0f838ff83a18 | [] | no_license | Haoran-Zhao/Ultrasound_and_UR3 | e397e66207789c50b8fe7ca7c7be9ac9dfa6e2da | bb0e4b19216a4b21a1af4b6524f4ed98fee8d83c | refs/heads/master | 2023-01-07T13:46:56.723360 | 2020-11-11T01:14:18 | 2020-11-11T01:14:18 | 274,579,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur10_e_moveit_config"
PROJECT_SPACE_DIR = "/home/haoran/UR_ws/install"
PROJECT_VERSION = "1.2.5"
| [
"zhaohaorandl@gmail.com"
] | zhaohaorandl@gmail.com |
d1eeeb7f9fe4e51847e4bda467776c66ae848bf0 | 709bd5f2ecc69a340da85f6aed67af4d0603177e | /saleor/order/migrations/0063_auto_20180926_0446.py | 49d4236c67cd412afeca9cbf220c84c641bc8465 | [
"BSD-3-Clause"
] | permissive | Kenstogram/opensale | 41c869ee004d195bd191a1a28bf582cc6fbb3c00 | 5102f461fa90f2eeb13b9a0a94ef9cb86bd3a3ba | refs/heads/master | 2022-12-15T02:48:48.810025 | 2020-03-10T02:55:10 | 2020-03-10T02:55:10 | 163,656,395 | 8 | 0 | BSD-3-Clause | 2022-12-08T01:31:09 | 2018-12-31T09:30:41 | Python | UTF-8 | Python | false | false | 1,004 | py | # Generated by Django 2.0.8 on 2018-09-26 09:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0062_auto_20180921_0949'),
]
operations = [
migrations.AlterField(
model_name='orderevent',
name='type',
field=models.CharField(choices=[('PLACED', 'placed'), ('PLACED_FROM_DRAFT', 'draft_placed'), ('OVERSOLD_ITEMS', 'oversold_items'), ('ORDER_MARKED_AS_PAID', 'marked_as_paid'), ('CANCELED', 'canceled'), ('ORDER_FULLY_PAID', 'order_paid'), ('UPDATED', 'updated'), ('EMAIL_SENT', 'email_sent'), ('PAYMENT_CAPTURED', 'captured'), ('PAYMENT_REFUNDED', 'refunded'), ('PAYMENT_RELEASED', 'released'), ('FULFILLMENT_CANCELED', 'fulfillment_canceled'), ('FULFILLMENT_RESTOCKED_ITEMS', 'restocked_items'), ('FULFILLMENT_FULFILLED_ITEMS', 'fulfilled_items'), ('TRACKING_UPDATED', 'tracking_updated'), ('NOTE_ADDED', 'note_added'), ('OTHER', 'other')], max_length=255),
),
]
| [
"Kenstogram@gmail.com"
] | Kenstogram@gmail.com |
09a814455de03668c5222bf0df188a91ea64e109 | 77d445489bf42d641eb3c3aeab18d0aac7912a37 | /AppDB/hypertable/test_hypertable2.py | 6ad758bac5a6efbc0056d9fba12399887ab40d1c | [
"BSD-3-Clause"
] | permissive | mohitsethi/appscale | 08574494e71c925a11ac6359af0eb5c0af3318a7 | 8d8cf664c36eae6e474923b4588e680b53fca564 | refs/heads/master | 2021-01-17T03:15:47.555515 | 2012-11-14T22:30:03 | 2012-11-14T22:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,081 | py | import py_hypertable
py_hypertable = py_hypertable.DatastoreProxy()
columns = ["a","b","c"]
data = ["1","2","3"]
table_name = "hello"
key = "1"
print "key= " + key
print "columns= " + str(columns)
print "data= " + str(data)
print "table= " + table_name
print "PUT"
print py_hypertable.put_entity(table_name, key, columns, data)
print "GET"
ret = py_hypertable.get_entity(table_name, key, columns)
print "doing a put then get"
print ret
if ret[1:] != data:
print "ERROR doing a put then get. Data does not match"
print "returned: " + str(ret)
print "expected: " + str(data)
exit(1)
else:
print "Success"
ret = py_hypertable.get_schema("hello")
print ret
print "checking schema:"
print ret
if ret[1:] != columns:
print "ERROR in recieved schema"
print "returned: " + str(ret)
print "expected: " + str(columns)
#ret = py_hypertable.__table_exist(table_name)
#print "Does table we just created exist?"
#print ret
ret = py_hypertable.delete_row(table_name, key)
print "Deleting the key %s"%key
print ret
ret = py_hypertable.get_entity(table_name, key, columns)
print "Trying to get deleted key:"
print ret
print "doing a put with key %s"%key
print py_hypertable.put_entity("hello", "1", ["a","b","c"], ["1","2","3"])
print "doing a get table"
print py_hypertable.get_table("hello", ["a","b","c"])
py_hypertable.put_entity("hello", "2", ["a","b","c"], ["4","5","6"])
print "doing get table:"
print py_hypertable.get_table("hello", ["a","b","c"])
py_hypertable.put_entity("hello", "3", ["a","b","c"], ["1","2","3"])
py_hypertable.get_table("hello", ["a","b","c"])
print "TRYING TO REPLACE KEY 3"
py_hypertable.put_entity("hello", "3", ["a","b","c"], ["1","2","3"])
py_hypertable.get_table("hello", ["a","b","c"])
py_hypertable.get_row_count("hello")
ret = py_hypertable.delete_row("hello", "1")
ret = py_hypertable.delete_row("hello", "2")
ret = py_hypertable.delete_row("hello", "3")
py_hypertable.get_table("hello", ["a","b","c"])
print "Deleting table:"
print py_hypertable.delete_table("hello")
print "deleting twice:"
print py_hypertable.delete_table("hello")
| [
"shatterednirvana@gmail.com"
] | shatterednirvana@gmail.com |
ffdf9a412d134a2974d096e09f13e7612d583a29 | 7437ad1203ff272a482e4a7c7266afdbc7a0e619 | /lra/models/gpu_16g/linear_transformer_exp/cifar10/r1/config.py | 76f36d46c3d7d73841b5969d906a21efecedfec3 | [] | no_license | maximzubkov/spe | 4ccc59d538a2cb4e5f9b0118ef79933eed0b8d95 | d877feb0f6b935152e5431ce374606ba72c08d65 | refs/heads/main | 2023-08-23T02:08:14.253693 | 2021-10-05T17:25:36 | 2021-10-05T17:25:36 | 385,636,912 | 0 | 0 | null | 2021-10-05T17:25:37 | 2021-07-13T14:42:19 | Jupyter Notebook | UTF-8 | Python | false | false | 2,009 | py | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from fast_self_attention import fast_self_attention as favor
import jax
from lra_benchmarks.image.configs.cifar10 import base_cifar10_config
from lra_benchmarks.image.configs.cifar10.base_cifar10_config import TRAIN_EXAMPLES, VALID_EXAMPLES
NUM_EPOCHS = 200
def get_config():
"""Get the hyperparameter configuration."""
config = base_cifar10_config.get_config()
config.random_seed = 0
config.model_type = "transformer"
config.learning_rate = .00025
config.batch_size = 96
config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
config.num_eval_steps = VALID_EXAMPLES // config.batch_size
config.factors = 'constant * linear_warmup * cosine_decay'
config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1
config.model.dropout_rate = 0.3
config.model.attention_dropout_rate = 0.2
config.model.learn_pos_emb = True
config.model.num_layers = 1
config.model.emb_dim = 128
config.model.qkv_dim = 64
config.model.mlp_dim = 128
config.model.num_heads = 8
config.model.classifier_pool = "CLS"
config.attention_fn = favor.make_fast_generalized_attention(
qkv_dim=config.model.qkv_dim // config.model.num_heads,
features_type='deterministic',
kernel_fn=jax.lax.exp,
lax_scan_unroll=16)
return config
def get_hyper(hyper):
return hyper.product([])
| [
"zubkov.md@phystech.edu"
] | zubkov.md@phystech.edu |
2ce9511a97d7656f5078b52357e45401ff6d5221 | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/11e29f78-1596.py | 89ecf78cd0947e922640644c55f527d24cf38eec | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*
#http://www.wooyun.org/bugs/wooyun-2010-085076,http://www.wooyun.org/bugs/wooyun-2010-086828,http://www.wooyun.org/bugs/wooyun-2010-086831,http://www.wooyun.org/bugs/wooyun-2010-086833,http://www.wooyun.org/bugs/wooyun-2010-086834,
def assign(service, arg):
if service == 'strongsoft':
return True, arg
def audit(arg):
payloads = [
"Response/AjaxHandle/AjaxSingleGetReferenceFieldValue.ashx?strFieldValue=1&strSelectFieldCollection=1&tableName=sysobjects&strFieldName=convert(int,db_name(1))",
"Report/AjaxHandle/StationChoose/StationSearch.ashx?stationName=')+and+1=2++union+all+select+(db_name(1)),NULL--&stationType='KKK'&sqlW",
"warn/OuterWarnModEdit.aspx?ModID=1+AND+5726=CONVERT(INT,(select+top+1+db_name(1)+from+strongmain.dbo.Web_SystemUser))",
"Duty/MailList/ContactUpdate.aspx?ReadOnly=&UnitID=1&ContactID=-1+and+1=db_name(1)"]
for payload in payloads:
vul_url = arg + payload
code,head,res,_,_ = curl.curl2(vul_url)
if code==200 and 'master' in res:
security_hole(vul_url)
if __name__ == '__main__':
from dummy import *
audit(assign('strongsoft','http://183.129.136.54:3050/')[1])
audit(assign('strongsoft','http://ldfxb.com/')[1]) | [
"yudekui@wsmtec.com"
] | yudekui@wsmtec.com |
cf12548fefc34361610649871cb85ca3d17f1b01 | b03f83a7ea6437b1f9bbfedfee73deb671be9653 | /sprinter/formula/tests/test_template.py | 16a3914784b2fa107fa4bf0cabe423ca68677276 | [
"MIT"
] | permissive | GreenGremlin/sprinter | 15b874fbc6c20a43d0a8a3bb94c81e6e531ca63a | f129f8de7b2100f554d0b36055b859c4f5dd1f03 | refs/heads/develop | 2021-01-19T07:06:50.334903 | 2016-02-23T21:12:52 | 2016-02-23T21:12:52 | 49,906,263 | 0 | 1 | null | 2016-01-18T21:23:53 | 2016-01-18T21:23:53 | null | UTF-8 | Python | false | false | 2,616 | py | from __future__ import unicode_literals
import httpretty
import os
import shutil
import tempfile
from sprinter.testtools import FormulaTest
source_config = """
[update_example]
formula = sprinter.formula.template
source = %(temp_dir)s/in.txt
target = %(temp_dir)s/out.txt
"""
target_config = """
[simple_example]
formula = sprinter.formula.template
source = %(temp_dir)s/in.txt
target = %(temp_dir)s/out.txt
[http_example]
formula = sprinter.formula.template
source = http://testme.com/test.txt
target = %(temp_dir)s/out.txt
[update_example]
formula = sprinter.formula.template
source = %(temp_dir)s/in.txt
target = %(temp_dir)s/out.txt
on_update = true
"""
class TestUnpackFormula(FormulaTest):
""" Tests for the unpack formula """
def setup(self):
self.temp_dir = tempfile.mkdtemp()
config_dict = {'temp_dir': self.temp_dir}
super(TestUnpackFormula, self).setup(source_config=(source_config % config_dict),
target_config=(target_config % config_dict))
def teardown(self):
shutil.rmtree(self.temp_dir)
def test_simple_example(self):
""" The template formula should grab a template and save it """
with open(os.path.join(self.temp_dir, 'in.txt'), 'w+') as fh:
fh.write(SIMPLE_TEMPLATE)
self.environment.run_feature("simple_example", 'sync')
out_file = os.path.join(self.temp_dir, 'out.txt')
assert os.path.exists(out_file)
assert open(out_file).read() == SIMPLE_TEMPLATE
@httpretty.activate
def test_http_example(self):
""" The template formula should grab a template via http and save it """
TEST_URI = "http://testme.com/test.txt"
httpretty.register_uri(httpretty.GET, TEST_URI,
body=SIMPLE_TEMPLATE)
self.environment.run_feature("http_example", 'sync')
out_file = os.path.join(self.temp_dir, 'out.txt')
assert os.path.exists(out_file)
assert open(out_file).read() == SIMPLE_TEMPLATE
def test_update_example(self):
""" The template formula should update a template when on_update is set """
with open(os.path.join(self.temp_dir, 'in.txt'), 'w+') as fh:
fh.write(UPDATE_TEMPLATE)
self.environment.run_feature("update_example", 'sync')
out_file = os.path.join(self.temp_dir, 'out.txt')
assert os.path.exists(out_file)
assert open(out_file).read() == UPDATE_TEMPLATE
SIMPLE_TEMPLATE = """
This is a simple template.
"""
UPDATE_TEMPLATE = """
This is an updated template.
"""
| [
"tsutsumi.yusuke@gmail.com"
] | tsutsumi.yusuke@gmail.com |
1a6eaef4367b336d7870b795dc593757ee81b899 | 6a41dd36ddd3e501b62ff253b40bf9bbbaa722c2 | /간단한 파이썬 프로그램/polygon.py | 736c5d5b10a49bd637225c4716d1414d7add5bb4 | [] | no_license | skysamer/first_python | 9ba79b194d838e0cdeab6f2e7a4207d71c73ed63 | 638622f51434eda65ef3300e3ce5db3a2a79db2a | refs/heads/master | 2023-02-03T08:21:23.370285 | 2020-12-27T13:39:20 | 2020-12-27T13:39:20 | 307,953,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import turtle as t
def polygon(n):
for x in range(n):
t.fd(50)
t.lt(360/n)
def polygon2(n,a):
for x in range(n):
t.fd(a)
t.lt(360/n)
polygon(3)
polygon(5)
# 그림을 그리지 않고 거북이를 100만큼 이동시킵니다.
t.up()
t.fd(100)
t.down()
polygon2(3, 75)
polygon2(5, 100)
| [
"skyslayer123@naver.com"
] | skyslayer123@naver.com |
11253aa7cbe8c15ff067e431554014aea164fe0d | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/framework/data/ConvertFileSystem.pyi | 3140db6203ea76118da9b94f4b3859945cbe8573 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,053 | pyi | from typing import List
import ghidra
import ghidra.framework.data.ConvertFileSystem
import java.io
import java.lang
class ConvertFileSystem(object, ghidra.GhidraLaunchable):
class MessageListener(object):
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def println(self, __a0: unicode) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
class ConvertFileSystemException(java.io.IOException):
@overload
def __init__(self): ...
@overload
def __init__(self, __a0: unicode): ...
@overload
def __init__(self, __a0: unicode, __a1: java.lang.Throwable): ...
def addSuppressed(self, __a0: java.lang.Throwable) -> None: ...
def equals(self, __a0: object) -> bool: ...
def fillInStackTrace(self) -> java.lang.Throwable: ...
def getCause(self) -> java.lang.Throwable: ...
def getClass(self) -> java.lang.Class: ...
def getLocalizedMessage(self) -> unicode: ...
def getMessage(self) -> unicode: ...
def getStackTrace(self) -> List[java.lang.StackTraceElement]: ...
def getSuppressed(self) -> List[java.lang.Throwable]: ...
def hashCode(self) -> int: ...
def initCause(self, __a0: java.lang.Throwable) -> java.lang.Throwable: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@overload
def printStackTrace(self) -> None: ...
@overload
def printStackTrace(self, __a0: java.io.PrintStream) -> None: ...
@overload
def printStackTrace(self, __a0: java.io.PrintWriter) -> None: ...
def setStackTrace(self, __a0: List[java.lang.StackTraceElement]) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self): ...
@staticmethod
def convertProject(dir: java.io.File, msgListener: ghidra.framework.data.ConvertFileSystem.MessageListener) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def launch(self, layout: ghidra.GhidraApplicationLayout, args: List[unicode]) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
4930c0bed9781c2e2863981e362b444cb6645535 | 54d4b509e511d1d281e791688221ecb816f2fe88 | /dicionarios/dicionario.py | b87777fa430aa9467ce9535a65fa85a730e6a014 | [] | no_license | treinaweb/treinaweb-python-collections | 285c1ee1afe54f57b7ce08f0a786a080f82037e4 | 6dc0c8cf54f00edf69c7224342bebfcf81cda1d4 | refs/heads/master | 2020-03-30T09:40:16.423932 | 2018-10-11T23:00:01 | 2018-10-11T23:00:01 | 151,086,808 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | meu_dicionario = {1 : 'Fabio', 2 : 'Maria', 3 : 'João', 4 : 'José'}
print(type(meu_dicionario))
meu_dicionario_2 = dict({1 : 'Fabio', 2 : 'Maria', 3 : 'João', 4 : 'José'})
print(type(meu_dicionario_2))
print(meu_dicionario[4])
for chave, valor in meu_dicionario.items():
print(f" A chave é {chave} e o valor {valor}") | [
"fagnerpinheirosantos@gmail.com"
] | fagnerpinheirosantos@gmail.com |
ac2557598d14a8b2eb18005f3dfb47c016fb27c4 | d6568d4c3cf29ec5da430db2fdedb9023487134e | /configs.py | a241905a455d46bd9f4a265fd54dda63ac8f9036 | [] | no_license | j-min/Adversarial_Video_Summary | 7aba4135f57219af31b080696db67f8ceb6b527e | fb6d5bf70479373f96f2d944c672af8286c9bc89 | refs/heads/master | 2023-04-07T07:21:38.630603 | 2023-03-28T18:20:30 | 2023-03-28T18:20:30 | 110,589,567 | 253 | 67 | null | null | null | null | UTF-8 | Python | false | false | 3,191 | py | # -*- coding: utf-8 -*-
import argparse
from pathlib import Path
import pprint
project_dir = Path(__file__).resolve().parent
dataset_dir = Path('/data1/jysung710/tmp_sum/360video/').resolve()
video_list = ['360airballoon', '360parade', '360rowing', '360scuba', '360wedding']
save_dir = Path('/data1/jmcho/SUM_GAN/')
score_dir = Path('/data1/common_datasets/tmp_sum/360video/results/SUM-GAN/')
def str2bool(v):
"""string to boolean"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class Config(object):
def __init__(self, **kwargs):
"""Configuration Class: set kwargs as class attributes with setattr"""
for k, v in kwargs.items():
setattr(self, k, v)
self.set_dataset_dir(self.video_type)
def set_dataset_dir(self, video_type='360airballon'):
if self.preprocessed:
self.video_root_dir = dataset_dir.joinpath('resnet101_feature', video_type, self.mode)
else:
self.video_root_dir = dataset_dir.joinpath('video_subshot', video_type, 'test')
self.save_dir = save_dir.joinpath(video_type)
self.log_dir = self.save_dir
self.ckpt_path = self.save_dir.joinpath(f'epoch-{self.epoch}.pkl')
self.score_dir = score_dir
def __repr__(self):
"""Pretty-print configurations in alphabetical order"""
config_str = 'Configurations\n'
config_str += pprint.pformat(self.__dict__)
return config_str
def get_config(parse=True, **optional_kwargs):
"""
Get configurations as attributes of class
1. Parse configurations with argparse.
2. Create Config class initilized with parsed kwargs.
3. Return Config class.
"""
parser = argparse.ArgumentParser()
# Mode
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--verbose', type=str2bool, default='true')
parser.add_argument('--preprocessed', type=str2bool, default='True')
parser.add_argument('--video_type', type=str, default='360airballoon')
# Model
parser.add_argument('--input_size', type=int, default=2048)
parser.add_argument('--hidden_size', type=int, default=500)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--summary_rate', type=float, default=0.3)
# Train
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--clip', type=float, default=5.0)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--discriminator_lr', type=float, default=1e-5)
parser.add_argument('--discriminator_slow_start', type=int, default=15)
# load epoch
parser.add_argument('--epoch', type=int, default=2)
if parse:
kwargs = parser.parse_args()
else:
kwargs = parser.parse_known_args()[0]
# Namespace => Dictionary
kwargs = vars(kwargs)
kwargs.update(optional_kwargs)
return Config(**kwargs)
if __name__ == '__main__':
config = get_config()
import ipdb
ipdb.set_trace()
| [
"heythisischo@gmail.com"
] | heythisischo@gmail.com |
67bdc20397684f2e7335ca322c2b75ebf829f1a4 | 07622a0fb38e843ab0eef4f69bb8fb25d107c06d | /pretrained_mol_sim/Theano-master/theano/tensor/tests/test_merge.py | a746c3f34dda4f0333b7adbf4af9c083a52cbf0a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | andreeadeac22/graph_coattention | fa59d77252625e4bee1cb9670e4a0fd0fec98135 | 23781fedaa942ca5614054f965cb7b6543e533fa | refs/heads/master | 2023-08-08T01:51:51.368457 | 2020-02-19T04:56:59 | 2020-02-19T04:56:59 | 207,414,336 | 15 | 4 | MIT | 2023-07-22T15:47:39 | 2019-09-09T22:13:34 | Python | UTF-8 | Python | false | false | 2,239 | py | from __future__ import absolute_import, print_function, division
import numpy
from theano.gof.type import Type
from theano.gof.graph import Variable, Apply, Constant
from theano.gof.op import Op
from theano.gof.opt import *
from theano.gof.fg import FunctionGraph as Env
from theano.gof.toolbox import *
import theano.tensor.basic as T
def as_variable(x):
if not isinstance(x, Variable):
raise TypeError("not a Variable", x)
return x
class MyType(Type):
def filter(self, data):
return data
def __eq__(self, other):
return isinstance(other, MyType)
class MyOp(Op):
def __init__(self, name, dmap=None, x=None):
if dmap is None:
dmap = {}
self.name = name
self.destroy_map = dmap
self.x = x
def make_node(self, *inputs):
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
raise Exception("Error 1")
outputs = [MyType()()]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, other):
return (self is other or isinstance(other, MyOp) and self.x is not None
and self.x == other.x)
def __hash__(self):
if self.x is not None:
return self.x
else: return id(self)
op1 = MyOp('Op1')
def test_merge_with_weird_eq():
"""numpy arrays don't compare equal like other python objects"""
# SCALAR CASE
x = T.constant(numpy.asarray(1), name='x')
y = T.constant(numpy.asarray(1), name='y')
g = Env([x, y], [x+y])
MergeOptimizer().optimize(g)
assert len(g.apply_nodes) == 1
node = list(g.apply_nodes)[0]
assert len(node.inputs) == 2
assert node.inputs[0] is node.inputs[1]
# NONSCALAR CASE
# This was created to test TensorConstantSignature
x = T.constant(numpy.ones(5), name='x')
y = T.constant(numpy.ones(5), name='y')
g = Env([x, y], [x+y])
MergeOptimizer().optimize(g)
assert len(g.apply_nodes) == 1
node = list(g.apply_nodes)[0]
assert len(node.inputs) == 2
assert node.inputs[0] is node.inputs[1]
| [
"andreeadeac22@gmail.com"
] | andreeadeac22@gmail.com |
3f799ca0a7b0e313884b05a1dd27c3cc13f529f3 | 3d4094bc2b372f93bb61bd6422c061552b06a775 | /migen/pytholite/fsm.py | 7bc6c4c227f5f62e7c1567098201b796e5c8c42d | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | vic0/migen | efff647f925687f77acd926e36f1bba6b2e620f7 | d6f7b4cee6a242d1962766f53a09b48871a188fc | refs/heads/master | 2021-01-17T21:36:38.723171 | 2013-06-17T21:36:03 | 2013-06-17T21:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | from migen.fhdl import visit as fhdl
from migen.genlib.fsm import FSM
class AbstractNextState:
def __init__(self, target_state):
self.target_state = target_state
# entry state is first state returned
class StateAssembler:
def __init__(self):
self.states = []
self.exit_states = []
def assemble(self, n_states, n_exit_states):
self.states += n_states
for exit_state in self.exit_states:
exit_state.insert(0, AbstractNextState(n_states[0]))
self.exit_states = n_exit_states
def ret(self):
return self.states, self.exit_states
# like list.index, but using "is" instead of comparison
def _index_is(l, x):
for i, e in enumerate(l):
if e is x:
return i
class _LowerAbstractNextState(fhdl.NodeTransformer):
def __init__(self, fsm, states, stnames):
self.fsm = fsm
self.states = states
self.stnames = stnames
def visit_unknown(self, node):
if isinstance(node, AbstractNextState):
index = _index_is(self.states, node.target_state)
estate = getattr(self.fsm, self.stnames[index])
return self.fsm.next_state(estate)
else:
return node
def implement_fsm(states):
stnames = ["S" + str(i) for i in range(len(states))]
fsm = FSM(*stnames)
lans = _LowerAbstractNextState(fsm, states, stnames)
for i, state in enumerate(states):
actions = lans.visit(state)
fsm.act(getattr(fsm, stnames[i]), *actions)
return fsm
| [
"sebastien@milkymist.org"
] | sebastien@milkymist.org |
924182b2b4e656565bc908ced7ef983b8f21bb9a | c5dea14bc938dfc5d619f06963897d8c52625200 | /msp.py | e701aa6926f759e55e0cf1ff1838c26bc573efbb | [] | no_license | bradbeattie/OfficeSim | 958a10f9c9368b5782b3ff00ce52391c9080bb99 | 5b9c9bf2bc727fe0f096834792864f054e37df46 | refs/heads/master | 2020-04-30T01:37:46.182782 | 2019-03-19T14:50:20 | 2019-03-19T14:50:20 | 176,534,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,883 | py | #!/usr/bin/env python3
from collections import deque, namedtuple
from pathfinding.core import heuristic
from pathfinding.finder.finder import Finder
import heapq
import time
class MinimumSpanningTree(Finder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heuristic = heuristic.null
def tree(self, grid, start):
return list(self.itertree(grid, start))
def itertree(self, grid, start):
# Finder.process_node requires an end node, which we don't have. The following
# value tricks the call to Finder.apply_heuristic. Though maybe we want to generate
# a limited spanning tree that trends in a certain direction? In which case we'd
# want a more nuanced solution.
end = namedtuple("FakeNode", ["x", "y"])(-1, -1)
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
node = heapq.nsmallest(1, open_list)[0]
open_list.remove(node)
node.closed = True
yield node
neighbors = self.find_neighbors(grid, node)
for neighbor in neighbors:
if not neighbor.closed:
self.process_node(neighbor, node, end, open_list, open_value=True)
def find_path(self, start, end_test, grid):
for node in self.itertree(grid, start):
if end_test(node):
path = deque()
step = node
while step.parent:
path.appendleft(step)
step = step.parent
path.appendleft(step)
return path, self.runs
else:
return [], self.runs
| [
"you@example.com"
] | you@example.com |
85391701c6b68a512a4f34ffbf1634c60d5373a6 | 54aef785bcb729509af45347025e2c63bbd95e38 | /dg_fn/typecheck.py | d21fe3ee9557ebdf4cbd311a0650f9245ecfb5db | [
"MIT"
] | permissive | d-gold/dg_fn | c19980b7e0045f8b920f914e4c37015b5a348ab1 | 2586402faaa8a1d1a4b3f16e997ab0059f89cb9c | refs/heads/master | 2021-01-19T14:24:07.381631 | 2014-10-31T14:47:08 | 2014-10-31T14:47:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import collections
import config
from types import GeneratorType
from types import FunctionType
def is_function(obj):
"""@todo: Docstring for is_function.
:obj: @todo
:returns: @todo
"""
return isinstance(obj, FunctionType)
def is_gen(obj):
"""@todo: Docstring for is_gen.
:obj: @todo
:returns: @todo
"""
return isinstance(obj, GeneratorType)
def is_seq(obj):
"""@todo: Docstring for is_seq.
:seq: @todo
:returns: @todo
Tests to see if obj is a tuple, list or other sequence type object.
This will exclude strings and dictionaries.
"""
results = isinstance(obj, config.Sequence) or \
(isinstance(obj, collections.Sequence) and
not isinstance(obj, basestring))
return results
| [
"autowitch@autowit.ch"
] | autowitch@autowit.ch |
ae7b2ee2c052e83a5b3034e3bdfa042494112a8d | 3efe2059de4c7efd1f58a385656d19098b7efd63 | /deepiu/tools/classification-inference.py | 005d802fe9ae0eddc99879c01e0b338b5c5e0971 | [] | no_license | yangyaoyunshu/image-caption-ai-challenger2017 | 5d2e82b2f8d70ac6d4eb7a0e70f6b406e551189b | 7f2c556587ea1e5c4583fe3b12b8d40c5a2aa2cc | refs/heads/master | 2021-08-31T22:48:55.886186 | 2017-12-23T07:27:04 | 2017-12-23T07:27:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,413 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file classification-inference.py
# \author chenghuige
# \date 2017-11-10 15:32:56.988358
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys, os
from deepiu.util.classifier import Classifier
import melt
import glob
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model_dir', None, '')
flags.DEFINE_string('test_image_dir', None, '')
flags.DEFINE_integer('buffer_size', 512, '')
flags.DEFINE_string('result_file', None, '')
# HACK for nasnet
done_imgs = set()
def predict(predictor, imgs, out):
result = predictor.predict(imgs)
for img, top_class, top_logit, top_prediction, logit, prediction \
in zip(imgs, result.top_classes,
result.top_logits, result.top_predictions,
result.logits, result.predictions):
if img not in done_imgs:
print(os.path.basename(img), ' '.join(map(str, top_class)),
' '.join(map(str, top_logit)), ' '.join(map(str, top_prediction)),
' '.join(map(str, logit)), ' '.join(map(str, prediction)), sep='\t', file=out)
done_imgs.add(img)
def main(_):
model_dir = FLAGS.model_dir or sys.argv[1]
assert model_dir
model_path = melt.get_model_path(model_dir)
print('model_path:', model_path, file=sys.stderr)
result_file = FLAGS.result_file or model_path + '.inference.txt'
print('result file is:', result_file, file=sys.stderr)
out = open(result_file, 'w')
predictor = Classifier(model_dir)
imgs = []
files = glob.glob(FLAGS.test_image_dir + '/*')
num_files = len(files)
assert num_files, FLAGS.test_image_dir
print('num_files to inference', num_files)
finished = 0
for img_ in files:
imgs.append(img_)
if len(imgs) == FLAGS.buffer_size:
predict(predictor, imgs, out)
finished += len(imgs)
print('finished:%f' % (finished / float(num_files)), file=sys.stderr, end='\r')
imgs = []
if imgs:
# HACK for nasnet
while len(imgs) != FLAGS.buffer_size:
imgs.append(imgs[0])
predict(predictor, imgs, out)
imgs = []
if __name__ == '__main__':
tf.app.run()
| [
"29109317@qq.com"
] | 29109317@qq.com |
b6f7344ecd5ba6ce6be5fa984710b5a6f5c75229 | 621dfccf1ace31bcf48cd1e7cc80b8dbd556b21b | /03_bigdata/02_Standardization_Analysis/2_Excel/10_pandas_column_by_name_all_worksheets.py | 3fa5d8218a1b05fe1ece42552de3adbc5296f016 | [] | no_license | hansangwoo1969/iot_python2019 | 35d90997b442845d1f5fa5a6119d5352a8dbe46f | e1ea46bb4afcc33e1fc28b43e845759d8688098b | refs/heads/master | 2020-06-14T22:19:47.647686 | 2019-09-19T07:40:16 | 2019-09-19T07:40:16 | 195,142,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | import pandas as pd
# input_file = sys.argv[1]
# output_file = sys.argv[2]
input_file = 'sales_2013.xlsx'
output_file = 'output_files/10output_pandas.xls'
data_frame = pd.read_excel(input_file, sheet_name=None, index_col=None)
column_output = []
for worksheet_name, data in data_frame.items():
column_output.append(data.loc[:, ['Customer Name', 'Sale Amount']])
selected_columns = pd.concat(column_output, axis=0, ignore_index=True)
writer = pd.ExcelWriter(output_file)
selected_columns.to_excel(writer, sheet_name='selected_colums_all_worksheets', index=False)
print(selected_columns)
writer.save() | [
"you@example.com"
] | you@example.com |
6aa31a025a01a18cada5c8c08fb6207c8f607e7f | 134c429df7d5c4d067d9761cb1435992b048adaf | /notes/0832/0832.py | d70947957aadde34bd55aea7b6a3ae0960180eb1 | [] | no_license | PaulGuo5/Leetcode-notes | 65c6ebb61201d6f16386062e4627291afdf2342d | 431b763bf3019bac7c08619d7ffef37e638940e8 | refs/heads/master | 2021-06-23T09:02:58.143862 | 2021-02-26T01:35:15 | 2021-02-26T01:35:15 | 177,007,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | class Solution:
def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:
def flip(nums):
return nums[::-1]
def invert(nums):
res = []
for n in nums:
res.append(1) if n == 0 else res.append(0)
return res
res = []
for i in A:
res.append(invert(flip(i)))
return res
| [
"zhg26@pitt.edu"
] | zhg26@pitt.edu |
b9cf0b89dc509c98fd99750fda9c99fa168adda7 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/hand/pen/demo_bc4.py | 511999e071214ed507d4cac46df3f83f3c391384 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
num_epochs=10,
num_eval_steps_per_epoch=5000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=256,
replay_buffer_size=int(1E6),
algorithm="SAC",
version="normal",
collection_mode='batch',
layer_size=256,
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=True,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=0,
policy_weight_decay=1e-4,
bc_loss_type="mle",
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_path=["demos/icml2020/hand/pen.npy"],
# demo_off_policy_path=[
# "ashvin/icml2020/hand/door/demo-bc1/run3/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run4/video_*.p",
# "ashvin/icml2020/hand/door/demo-bc1/run5/video_*.p",
# ],
),
logger_variant=dict(
tensorboard=True,
),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
)
search_space = {
'env': ["pen-v0", ],
'seedid': range(3),
'trainer_kwargs.beta': [10, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| [
"alexanderkhazatsky@gmail.com"
] | alexanderkhazatsky@gmail.com |
b96a691be9bf43d42cb3803766ef3330a15f8b1f | 2c190caa30c907a7177bfa588bce8c55158e03f7 | /examples/demo_cyma57_numpy.py | 5b9c53f7b384e2c3fae915c0298c0e25c4d42bed | [] | no_license | PythonOptimizers/HSL.py | 4ee86a5bc0f4ce731d5bc704aa4dd1a6870fc2ec | 4456d20a193749e16df9317059903da8888923bd | refs/heads/master | 2020-12-30T21:57:52.617301 | 2017-09-19T08:28:16 | 2017-09-19T08:28:16 | 43,091,560 | 5 | 0 | null | 2017-09-19T08:28:18 | 2015-09-24T20:32:41 | Python | UTF-8 | Python | false | false | 1,074 | py | """Exemple from MA57 spec sheet: http://www.hsl.rl.ac.uk/specs/ma57.pdf."""
import sys
import numpy as np
from hsl.solvers.src._cyma57_numpy_INT32_FLOAT64 import NumpyMA57Solver_INT32_FLOAT64
n = 5
nnz = 7
A = np.array([[2.0, 3.0, 0, 0, 0], [0, 0, 4.0, 0, 6.0], [0, 0, 1, 5, 0],
[0, 0, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=np.float32)
arow = np.array([0, 0, 1, 1, 2, 2, 4], dtype=np.int32)
acol = np.array([0, 1, 2, 4, 2, 3, 4], dtype=np.int32)
aval = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0], dtype=np.float64)
rhs = np.array([8, 45, 31, 15, 17], dtype=np.float64)
context = NumpyMA57Solver_INT32_FLOAT64(n, n, nnz)
context.get_matrix_data(arow, acol, aval)
context.analyze()
context.factorize()
print 'Solve:'
x, residual = context.solve(rhs, True)
# x = context.solve(rhs, False)
print ' x:'
print x
print ' residual:'
print residual
print 'Fetch_perm:'
perm = context.fetch_perm()
print ' perm:'
print perm
print 'Refine:'
(new_x, new_res) = context.refine(x, rhs, residual, 5)
print ' new_x: '
print new_x
print ' new_res: '
print new_res
| [
"sylvain.arreckx@gmail.com"
] | sylvain.arreckx@gmail.com |
c79ffaeb263eecefdb9ae48f44034d33f1a0fdf0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03449/s367389051.py | 881c4d5812aa46e6c354fe582596f03e103ac910 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | n=int(input())
a=[[int(i) for i in input().split()] for j in range(2)]
sentou=[]
usiro=[]
for i in range(n):
if i==0:
sentou.append(a[0][0])
usiro.append(a[1][-1])
else:
sentou.append(a[0][i]+sentou[-1])
usiro.append(a[1][-i-1]+usiro[-1])
usiro=usiro[::-1]
ans=[]
for i in range(n):
ans.append(sentou[i]+usiro[i])
print(max(ans)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
95c145c1a6f3e7aab87055b335dd9a17f3903236 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/io_curve_gcode/__init__.py | f13f9f54da7edac8a53529973f63b59945de89a4 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "CNC Gcode format",
"author": "Hidesato Ikeya (BonBaba)",
"version": (1, 0),
"blender": (2, 6, 2),
"location": "File > Import > CNC Gcode",
"description": "Import CNC Gcode files",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Learnbgame",
}
import bpy
# ImportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ImportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
class ImportSomeData(Operator, ImportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "import_curve.gcode" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Import CNC Gcode"
# ImportHelper mixin class uses this
filename_ext = ".gcode"
filter_glob = StringProperty(
default="*.gcode",
options={'HIDDEN'},
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_setting = BoolProperty(
name="Example Boolean",
description="Example Tooltip",
default=True,
)
type = EnumProperty(
name="Example Enum",
description="Choose between two items",
items=(('OPT_A', "First Option", "Description one"),
('OPT_B', "Second Option", "Description two")),
default='OPT_A',
)
def execute(self, context):
txt = context.blend_data.texts.load(filepath)
return {'FINISHED'}
# Only needed if you want to add into a dynamic menu
def menu_func_import(self, context):
self.layout.operator(ImportSomeData.bl_idname, text="CNC gcode (.gcode)")
def register():
bpy.utils.register_class(ImportSomeData)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_class(ImportSomeData)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
# test call
bpy.ops.import_test.some_data('INVOKE_DEFAULT')
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
661ba502f10dd7fad41e6347036320a07f9aacb5 | 391a40002b63daff8bb056b2f0b2ae3f7ee52bb3 | /项目/16组_基于YOLO_V3的目标识别系统/YOLO3/Darknet.py | b71c798953d6983c147ca8fcc8524c18774285d1 | [] | no_license | realllcandy/USTC_SSE_Python | 7b40f0e4ae3531fc41683fd19f71a58ce3815cdb | 3ac15a95e8a99491c322481a70c14b6ab830082f | refs/heads/master | 2023-03-17T21:30:19.068695 | 2020-10-10T06:15:28 | 2020-10-10T06:15:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,395 | py | from __future__ import division
from Parse_config import parse_config
from Parse_config import create_modules
from util import *
class Darknet(nn.Module):
def __init__(self, cgfile):
super(Darknet, self).__init__()
self.blocks = parse_config(cgfile)
self.net_info, self.module_list = create_modules(self.blocks)
# 定义前向传播,self.blocks因为的第一个元素self.blocks是一个net不属于正向传递的块。
def forward(self, x, CUDA):
modules = self.blocks[1:]
# 键值对。key为layer的索引,value是特征矩阵(feature map)
outputs = {}
# 写标志为0
write = 0
for i, module in enumerate(modules):
module_type = module['type']
if module_type == 'convolutional' or module_type == 'upsample':
# 如果模块是卷积模块或上采样模块,则这就是正向传递的工作方式。
x = self.module_list[i](x)
elif module_type == 'route':
layers = module['layers']
layers = [int(a) for a in layers]
if layers[0] > 0:
layers[0] = layers[0] - i
if len(layers) == 1:
x = outputs[i + layers[0]]
else:
if (layers[1]):
layers[1] = layers[1] - i
mp1 = outputs[i + layers[0]]
mp2 = outputs[i + layers[1]]
# 在深度上连接,及channels连接,要保证长宽一致
x = torch.cat((mp1, mp2), 1)
# 残差网络
elif module_type == 'shortcut':
from_ = int(module['from'])
x = outputs[i - 1] + outputs[i + from_]
elif module_type == 'yolo':
#获得三个anchors值
anchors = self.module_list[i][0].anchors
# 获得输入维度
input_dim = int(self.net_info['height'])
# 需要检测的物体个数
num_classes = int(module['classes'])
# transform
x = x.data.cuda()
#x的shape(batch_size,channels,长,宽)
#shape torch.Size([1, 255, 13, 13])
#print('prediction.shape',x.shape)
x = predict_transform(x, input_dim, anchors, num_classes, CUDA)
# 第一次yolo检测的时候,因为第二张检测图还没生成,还不能concat
if not write: # if no collector has been intialised.
detections = x
write = 1
else:
detections = torch.cat((detections, x), 1)
outputs[i] = x
# 返回的是三张特征图的连接
return detections
def load_weights(self, weightfile):
# 打开权重文件
fp = open(weightfile, "rb")
# The first 4 values are header information
# 1. Major version number
# 2. Minor Version Number
# 3. Subversion number
# 4. IMages seen
header = np.fromfile(fp, dtype=np.int32, count=5)
self.header = torch.from_numpy(header)
self.seen = self.header[3]
# The rest of the values are the weights
# Let's load them up
weights = np.fromfile(fp, dtype=np.float32)
ptr = 0
for i in range(len(self.module_list)):
module_type = self.blocks[i + 1]["type"]
if module_type == "convolutional":
model = self.module_list[i]
try:
batch_normalize = int(self.blocks[i + 1]["batch_normalize"])
except:
batch_normalize = 0
conv = model[0]
if (batch_normalize):
bn = model[1]
# 获得批量归一化层的参数个数
num_bn_biases = bn.bias.numel()
# 从weights中加载参数
bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases])
ptr += num_bn_biases
bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# 把权重reshape成模型需要的参数的形状
bn_biases = bn_biases.view_as(bn.bias.data)
bn_weights = bn_weights.view_as(bn.weight.data)
bn_running_mean = bn_running_mean.view_as(bn.running_mean)
bn_running_var = bn_running_var.view_as(bn.running_var)
# 复制参数到模型中去
bn.bias.data.copy_(bn_biases)
bn.weight.data.copy_(bn_weights)
bn.running_mean.copy_(bn_running_mean)
bn.running_var.copy_(bn_running_var)
else:
# 如果没加载成功,获得卷积偏差参数的数量
num_biases = conv.bias.numel()
# 加载权重
conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases])
ptr = ptr + num_biases
# 把权重reshape成模型需要的参数的形状
conv_biases = conv_biases.view_as(conv.bias.data)
# 复制参数到模型中去
conv.bias.data.copy_(conv_biases)
# 最后加载卷积层的参数
num_weights = conv.weight.numel()
# 和上面过程一样
conv_weights = torch.from_numpy(weights[ptr:ptr + num_weights])
ptr = ptr + num_weights
conv_weights = conv_weights.view_as(conv.weight.data)
conv.weight.data.copy_(conv_weights)
# In[7]:
def get_test_input():
img = cv2.imread('./dog-cycle-car.png')
img = cv2.resize(img, (416, 416))
# (3,416,416)
img_ = img.transpose((2, 0, 1))
img_ = img_[np.newaxis, :, :, :] / 255.
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
return img_
'''
该张量的形状为1 x 10647 x 85。
第一维是批处理大小,由于我们使用了单个图像,
因此批量大小仅为1。对于批次中的每个图像
我们都有一个10647 x 85的表格。
每个表的行都表示一个边界框。
(4个bbox属性,1个客观分数和80个分类的分数)
'''
# model = Darknet('cfg/yolov3.cfg')
# inp = get_test_input()
# pred = model(inp,torch.cuda.is_available())
# print(pred.shape)
# In[ ]:
# In[9]:
# model = Darknet('cfg/yolov3.cfg')
# model.load_weights('cfg/yolov3.weights')
# In[ ]:
# In[11]:
# def write_results(prediction,confidence,num_classes,nms_conf=0.4):
# #对于prediction有B*10647个边界框,如果object检测预测值小于confidence
# #则忽略
# #在prediction第二维加入一维,代表conf_mask
# conf_mask = (prediction[:,:,2]>confidence).float().unsqueeze(2)
# prediction = prediction*conf_mask
#
#
# box_corner = prediction.new(prediction.shape)
# box_corner[:, :, 0] = (prediction[:, :, 0] - prediction[:, :, 2] / 2)
# box_corner[:, :, 1] = (prediction[:, :, 1] - prediction[:, :, 3] / 2)
# box_corner[:, :, 2] = (prediction[:, :, 0] + prediction[:, :, 2] / 2)
# box_corner[:, :, 3] = (prediction[:, :, 1] + prediction[:, :, 3] / 2)
# prediction[:, :, :4] = box_corner[:, :, :4]
#
# batch_size = prediction.size(0)
#
# write = False
#
# for ind in range(batch_size):
# image_pred = prediction[ind] # image Tensor
# # confidence threshholding
# # 执行非极大值抑制
# max_conf, max_conf_score = torch.max(image_pred[:, 5:5 + num_classes], 1)
# max_conf = max_conf.float().unsqueeze(1)
# max_conf_score = max_conf_score.float().unsqueeze(1)
# seq = (image_pred[:, :5], max_conf, max_conf_score)
# image_pred = torch.cat(seq, 1)
# # In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"321699849@qq.com"
] | 321699849@qq.com |
0764433513ccd090a92cb26093a3447152ee3e64 | 27f51d85ca25d4d0db9b26cb8b932b281e041f65 | /cw/object_hierarchies/from_table.py | fb3df612be15f4b352270f8b30bb5cefa721fd2a | [
"MIT"
] | permissive | aarondewindt/cw | 2bbb9b66b0e8e02055208a71ef92e0fa97c0d589 | 6be9770da0f0fc34ea47d7ab83e6929c4823e98a | refs/heads/master | 2023-05-28T04:59:24.099091 | 2021-03-30T20:55:31 | 2021-03-30T20:55:31 | 170,211,817 | 2 | 0 | MIT | 2021-11-01T13:55:23 | 2019-02-11T22:19:11 | Python | UTF-8 | Python | false | false | 4,958 | py | import re
from pathlib import PurePosixPath
from itertools import groupby, islice
import numpy as np
from typing import Dict, Union, Any
from cw.tree_node import TreeNode
# Regular expressions matching the naming scheme of ndarrays.
ndarray_re = re.compile(r"^\s*(\w+?)((?:_\d+)+)\s*$")
def tables_to_object_hierarchy(tables):
"""
Transforms a dictionary of tables into a object hierarchy.
List elements with the same name ending with trailing integers separated
by underscores (example `name_0_1`) are combined into a single
:class:`numpy.ndarray` with n+1 dimensions, where n is the number of trailing
integers. The first dimension's size is the same as the length of the lists.
The trailing integers then define the index of the higher dimensions where
the data will be inserted to.
Elements have be grouped in sub-dictionaries. If the key of the dictionary
is a string it will be appended to the front of the child elements name.
It can thus be used to define namespaces. Otherwise the key is ignored.
Example
.. code-block:: python
inp = {
4: {
'foo.quux_0_0': np.array([9, 11, 14, 17]),
'foo.quux_1_0': np.array([10, 12, 15, 18]),
'foo.quux_2_0': np.array([11, 13, 16, 19])
},
"bar": {
"bas": 123
}
}
{
'foo': {
'quux': np.array([[[9.], [10.], [11.]],
[[11.], [12.], [13.]],
[[14.], [15.], [16.]],
[[17.], [18.], [19.]]])
},
"bar": {
"bas": 123
}
}
:param tables: Dictionary containing dictionaries whose key is the path
to the element in the resulting object hierarchy.
:return: Object hierarchy
"""
data_tables = flatten_tables(tables)
process_ndarrays(data_tables)
root_node = TreeNode.from_path_value_pairs(data_tables)
root_obj = root_node.object_hierarchy()
return root_obj
def flatten_tables(tables: Dict[Union[int, str], Dict[str, Any]]):
"""
Returns a list containing tuples with two elements, the first being a
:class:`pathlib.PurePosixPath` with the path to the value in the final
object hierarchy and second one being the value. Namespaces are resolved
:param tables:
:return:
"""
flat_tables = {}
for namespace, local_tables in tables.items():
# If the namespace is not a string, the element is placed on the root namespace.
# Everything behind the hashtag is a comment.
namespace = namespace.split("#")[0].strip() if isinstance(namespace, str) else ""
for node_name, node_value in local_tables.items():
path = PurePosixPath(namespace, *node_name.split("."))
flat_tables[path] = node_value
return flat_tables
def process_ndarrays(tables):
# It is not possible to make the changes in the table inside of the
# main loop because it's not possible to change the length of a
# dictionary while iterating through it.
table_changes = []
for path, group in find_ndarrays(tables):
# This list will contain the changes that are needed to be made
# in the table.
change = [path, [], None]
# Find shape of ndarray
# Initializes the size as 0 for all dimensions.
# Iterates through all of the elements in the array to look for
# the largest index and sets the size to the index plus 1.
shape = [0] * len(group[0][1])
for _, idx in group:
for i, size in enumerate(idx):
size += 1
if size > shape[i]:
shape[i] = size
# The first dimension of the final array should have the same
# size as the length of the columns.
shape = (len(tables[group[0][0]]), *shape)
# Initialize new ndarray
array = np.empty(shape)
# Copy the data of the old column and put it in the new array.
for col_path, idx in group:
array[(slice(None), *idx)] = np.array(tables[col_path])
change[1].append(col_path)
change[2] = array
table_changes.append(change)
# Apply the changes to the table.
for path, old_paths, value in table_changes:
for old_path in old_paths:
del tables[old_path]
tables[path] = value
def find_ndarrays(tables):
def ndarray_cols():
for path, value in tables.items():
match = ndarray_re.match(path.name)
if match:
yield path.with_name(match.group(1)), path, \
tuple(map(int, islice(match.group(2).split("_"), 1, None)))
for k, g in groupby(ndarray_cols(), lambda x: x[0]):
group = []
for e in g:
group.append((e[1], e[2]))
yield k, group
| [
"aaron.dewindt@gmail.com"
] | aaron.dewindt@gmail.com |
621d6245168b41a79ddcfce479071ba64f10ef49 | 55ae369a3ef1593ff31a76847deb2a0d33898895 | /mango/orders.py | 8a9fbb9c6a72ceef5ddc22f99dddc06629661958 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Investin-pro/mango-explorer | 63afb2ad4fb272f5640d18d3df367a6877b3a99a | 4760bd5f9d7067e24c12941d3d7d113b1a7173ef | refs/heads/master | 2023-07-31T23:23:00.590654 | 2021-10-01T17:13:18 | 2021-10-01T17:13:18 | 402,579,362 | 1 | 3 | MIT | 2021-10-02T16:31:43 | 2021-09-02T22:31:31 | Python | UTF-8 | Python | false | false | 6,145 | py | # # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import enum
import pyserum.enums
import typing
from decimal import Decimal
from pyserum.market.types import Order as PySerumOrder
from solana.publickey import PublicKey
from .constants import SYSTEM_PROGRAM_ADDRESS
# # 🥭 Orders
#
# This file holds some basic common orders data types.
#
# # 🥭 Side enum
#
# Is an order a Buy or a Sell?
#
class Side(enum.Enum):
# We use strings here so that argparse can work with these as parameters.
BUY = "BUY"
SELL = "SELL"
@staticmethod
def from_value(value: pyserum.enums.Side) -> "Side":
converted: pyserum.enums.Side = pyserum.enums.Side(int(value))
return Side.BUY if converted == pyserum.enums.Side.BUY else Side.SELL
def to_serum(self) -> pyserum.enums.Side:
return pyserum.enums.Side.BUY if self == Side.BUY else pyserum.enums.Side.SELL
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"{self}"
# # 🥭 OrderType enum
#
# 3 order types are supported: Limit (most common), IOC (immediate or cancel - not placed on the order book
# so if it doesn't get filled immediately it is cancelled), and Post Only (only ever places orders on the
# orderbook - if this would be filled immediately without being placed on the order book it is cancelled).
#
class OrderType(enum.Enum):
# We use strings here so that argparse can work with these as parameters.
UNKNOWN = "UNKNOWN"
LIMIT = "LIMIT"
IOC = "IOC"
POST_ONLY = "POST_ONLY"
@staticmethod
def from_value(value: Decimal) -> "OrderType":
converted: pyserum.enums.OrderType = pyserum.enums.OrderType(int(value))
if converted == pyserum.enums.OrderType.IOC:
return OrderType.IOC
elif converted == pyserum.enums.OrderType.POST_ONLY:
return OrderType.POST_ONLY
elif converted == pyserum.enums.OrderType.LIMIT:
return OrderType.LIMIT
return OrderType.UNKNOWN
def to_serum(self) -> pyserum.enums.OrderType:
if self == OrderType.IOC:
return pyserum.enums.OrderType.IOC
elif self == OrderType.POST_ONLY:
return pyserum.enums.OrderType.POST_ONLY
else:
return pyserum.enums.OrderType.LIMIT
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"{self}"
# # 🥭 Order named tuple
#
# A package that encapsulates common information about an order.
#
class Order(typing.NamedTuple):
id: int
client_id: int
owner: PublicKey
side: Side
price: Decimal
quantity: Decimal
order_type: OrderType
# Returns an identical order with the ID changed.
def with_id(self, id: int) -> "Order":
return Order(id=id, side=self.side, price=self.price, quantity=self.quantity,
client_id=self.client_id, owner=self.owner, order_type=self.order_type)
# Returns an identical order with the Client ID changed.
def with_client_id(self, client_id: int) -> "Order":
return Order(id=self.id, side=self.side, price=self.price, quantity=self.quantity,
client_id=client_id, owner=self.owner, order_type=self.order_type)
# Returns an identical order with the price changed.
def with_price(self, price: Decimal) -> "Order":
return Order(id=self.id, side=self.side, price=price, quantity=self.quantity,
client_id=self.client_id, owner=self.owner, order_type=self.order_type)
# Returns an identical order with the quantity changed.
def with_quantity(self, quantity: Decimal) -> "Order":
return Order(id=self.id, side=self.side, price=self.price, quantity=quantity,
client_id=self.client_id, owner=self.owner, order_type=self.order_type)
@staticmethod
def from_serum_order(serum_order: PySerumOrder) -> "Order":
price = Decimal(serum_order.info.price)
quantity = Decimal(serum_order.info.size)
side = Side.from_value(serum_order.side)
order = Order(id=serum_order.order_id, side=side, price=price, quantity=quantity,
client_id=serum_order.client_id, owner=serum_order.open_order_address,
order_type=OrderType.UNKNOWN)
return order
@staticmethod
def from_basic_info(side: Side, price: Decimal, quantity: Decimal, order_type: OrderType = OrderType.UNKNOWN) -> "Order":
order = Order(id=0, side=side, price=price, quantity=quantity, client_id=0,
owner=SYSTEM_PROGRAM_ADDRESS, order_type=order_type)
return order
@staticmethod
def from_ids(id: int, client_id: int, side: Side = Side.BUY) -> "Order":
return Order(id=id, client_id=client_id, owner=SYSTEM_PROGRAM_ADDRESS, side=side, price=Decimal(0), quantity=Decimal(0), order_type=OrderType.UNKNOWN)
def __str__(self) -> str:
owner: str = ""
if self.owner != SYSTEM_PROGRAM_ADDRESS:
owner = f"[{self.owner}] "
order_type: str = ""
if self.order_type != OrderType.UNKNOWN:
order_type = f" {self.order_type}"
return f"« 𝙾𝚛𝚍𝚎𝚛 {owner}{self.side} for {self.quantity:,.8f} at {self.price:.8f} [ID: {self.id} / {self.client_id}]{order_type} »"
def __repr__(self) -> str:
return f"{self}"
| [
"geoff@knife.opgeek.lan"
] | geoff@knife.opgeek.lan |
1a7f49d252262cf4f5342dc5526ae7df3940ed69 | f0b90daf26aa393ef260aeee0ac8a9e767378b09 | /icsbep/pu-sol-therm-004/openmc/case-10/generate_materials.py | 408eb9f841a5423f4deff32ac3b0262d125f7f73 | [] | no_license | ragusa/benchmarks | 4567e8567bd00def7ea4918636c3f300eb9ce10b | 41b2e4424129cbfd0664fd5877e0476b16dcba67 | refs/heads/master | 2020-03-23T09:05:21.329997 | 2018-05-24T18:13:19 | 2018-05-24T18:13:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | import openmc
mats = openmc.Materials()
mat = openmc.Material(1)
mat.name = "Plutonium nitrate solution"
mat.set_density('sum')
mat.add_nuclide('Pu239', 8.6298e-05)
mat.add_nuclide('Pu240', 2.7676e-06)
mat.add_nuclide('N14', 3.0060e-03)
mat.add_nuclide('H1', 5.9494e-02)
mat.add_nuclide('O16', 3.7440e-02)
mat.add_element('Fe', 1.5636e-06)
mats.append(mat)
mat = openmc.Material(2)
mat.name = "347 stainless steel"
mat.set_density('sum')
mat.add_element('Fe', 6.0386e-02)
mat.add_element('Cr', 1.6678e-02)
mat.add_element('Ni', 9.8504e-03)
mats.append(mat)
mat = openmc.Material(3)
mat.name = "Water at 27 C"
mat.set_density('sum')
mat.add_nuclide('H1', 6.6622e-02)
mat.add_nuclide('O16', 3.3311e-02)
mats.append(mat)
mats.export_to_xml()
| [
"paul.k.romano@gmail.com"
] | paul.k.romano@gmail.com |
d99ab296471734666c928b93c47adb43e1b01cb4 | 11ca0c393c854fa7212e783a34269f9dae84e8c7 | /Python/53. 最大子序和.py | 5aef6a8e42fedeb4357acd5a50c412e178cc55d7 | [] | no_license | VictoriqueCQ/LeetCode | dc84d81163eed26fa9dbc2114bba0b5c2ea881f4 | a77b3ead157f97f5d9599badb4d4c5da69de44ba | refs/heads/master | 2021-06-05T06:40:24.659909 | 2021-03-31T08:31:51 | 2021-03-31T08:31:51 | 97,978,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
size = len(nums)
f = [0]*size
for i in range(size):
f[i] = max(f[i-1]+nums[i],nums[i]) if i else nums[0]
return max(f)
# for i in range(1, len(nums)):
# nums[i] = max(nums[i - 1] + nums[i], nums[i])
# return max(nums)
| [
"1997Victorique0317"
] | 1997Victorique0317 |
760ca32687f45e85ad5d8ef044fe9dee3efb3806 | 5deea8f084b99e3636f2ebd6a78dc8bb0e7be719 | /app/lib/overrides/common/__init__.py | 7da85458dd259d282e8c317c9d2246a923bde09d | [
"Apache-2.0"
] | permissive | cisco-sso/mh2-poc | f39c3b78e85a7504bf97e2d1d48f03754428146e | a476c9e119e2e15834dc04242504049b9fc7e8e5 | refs/heads/master | 2020-04-25T06:08:35.199462 | 2019-02-25T20:36:16 | 2019-02-25T20:36:16 | 172,569,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | # from cert_manager_configs import cert_manager_configs
# from cert_manager import cert_manager
# from dex import dex
# from elasticsearch_curator import elasticsearch_curator
# from elasticsearch_exporter import elasticsearch_exporter
# from elasticsearch import elasticsearch
# from external_dns import external_dns
# from filebeat import filebeat
# from gangway import gangway
# from grafana import grafana
# from kibana import kibana
# from oauth2_proxy import oauth2_proxy
# from prometheus import prometheus
# from raw_cluster_role_bindings import raw_cluster_role_bindings
# from raw_limit_ranges import raw_limit_ranges
# from raw_oauth2_proxy_accesslist import raw_oauth2_proxy_accesslist
# from raw_priority_classes import raw_priority_classes
# try:
# __all__
# except:
# pass
# else:
# _module_type = type(__import__('sys'))
# for _sym, _val in sorted(locals().items()):
# if not _sym.startswith('_') and not isinstance(_val, _module_type) :
# __all__.append(_sym)
# del(_sym)
# del(_val)
# del(_module_type)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
73135e18349335c1ec2ce511946a5b1caf7c49e5 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/FiltersAndTransformers/Scripts/URLDecode/URLDecode_test.py | 50aa5184251ed807f2800c680873a5ffa6aac5b3 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 551 | py | from CommonServerPython import *
from URLDecode import main
import pytest
@pytest.mark.parametrize("url,res", [
('https:%2F%2Fexample.com', 'https://example.com'),
('https://example.com/?test%20this', 'https://example.com/?test this'),
])
def test_main(mocker, url, res):
mocker.patch.object(demisto, 'args', return_value={'value': url})
mocker.patch.object(demisto, 'results')
main()
results = demisto.results.call_args[0]
assert results[0]['HumanReadable'] == res
assert results[0]['Contents']['DecodedURL'] == res
| [
"noreply@github.com"
] | demisto.noreply@github.com |
5bdbb837a600b26052c3b243e2f3de71d343a724 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_2_00/interface/ethernet/fcoeport/__init__.py | f6cf5f2ddb01cd7fac894c1896592c41c6b81bb2 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,230 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class fcoeport(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/ethernet/fcoeport. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This provides the grouping for FCoE configuration
elements on a port.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__fcoeport_map',)
_yang_name = 'fcoeport'
_rest_name = 'fcoeport'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__fcoeport_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..32']}), is_leaf=True, yang_name="fcoeport-map", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'map'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-map-name-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'fcoeport']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'fcoeport']
def _get_fcoeport_map(self):
"""
Getter method for fcoeport_map, mapped from YANG variable /interface/ethernet/fcoeport/fcoeport_map (fcoe-map-name-type)
YANG Description: This specifies the name of the FCoE map.
"""
return self.__fcoeport_map
def _set_fcoeport_map(self, v, load=False):
"""
Setter method for fcoeport_map, mapped from YANG variable /interface/ethernet/fcoeport/fcoeport_map (fcoe-map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoeport_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoeport_map() directly.
YANG Description: This specifies the name of the FCoE map.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..32']}), is_leaf=True, yang_name="fcoeport-map", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'map'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoeport_map must be of a type compatible with fcoe-map-name-type""",
'defined-type': "brocade-fcoe:fcoe-map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..32']}), is_leaf=True, yang_name="fcoeport-map", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'map'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-map-name-type', is_config=True)""",
})
self.__fcoeport_map = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoeport_map(self):
self.__fcoeport_map = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..32']}), is_leaf=True, yang_name="fcoeport-map", rest_name="map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'alt-name': u'map'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='fcoe-map-name-type', is_config=True)
fcoeport_map = __builtin__.property(_get_fcoeport_map, _set_fcoeport_map)
_pyangbind_elements = {'fcoeport_map': fcoeport_map, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
a64489a63abd33ac5d7d969dc5e56a03c88fef72 | e174e13114fe96ad2a4eeb596a3d1c564ae212a8 | /Python for Finance Analyze Big Financial Data by Y. Hilpisch/Code of Python For Finance/4375OS_08_Code/4375OS_08_26_anderson_normality_test.py | eca79670b5fde3d4f04bfed103f040e548c02246 | [] | no_license | Kevinqian0501/python_books | c1a7632d66dceb46db439f7cbed86d85370aab42 | 0691e4685af03a296aafb02447e3585db55ce461 | refs/heads/master | 2021-08-30T19:27:03.985464 | 2017-12-19T05:56:31 | 2017-12-19T05:56:31 | 104,145,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | """
Name : 4375OS_08_26_Anderson_normality_test.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : yany@canisius.edu
paulyxy@hotmail.com
"""
from matplotlib.finance import quotes_historical_yahoo
from scipy import stats
ticker='IBM'
begdate=(2013,1,1)
enddate=(2013,11,9)
x = quotes_historical_yahoo(ticker, begdate, enddate,asobject=True,adjusted=True)
ret= (x.aclose[1:]-x.aclose[:-1])/x.aclose[:-1]
print(stats.anderson(ret, dist='norm')) | [
"kevin@Qians-MacBook-Pro.local"
] | kevin@Qians-MacBook-Pro.local |
aaecc346793adb3049b2cd3aaff6187664b8d483 | 47c39800fa6f928e0d13f26727ba52bda2aa6ff0 | /venv/Lib/site-packages/aliyunsdkrds/request/v20140815/DeleteHostAccountRequest.py | 2cbe5b940b9cfadae26e1e2f2b569c42e82b02a3 | [
"MIT"
] | permissive | dddluke/zhihuipingtai | 952ed5f9a4011cb4fb2765a0571c978af784d708 | 4e46e01440f8c270c05259ac0f38bd56dd04016c | refs/heads/master | 2023-03-09T03:32:47.807760 | 2021-02-26T02:36:10 | 2021-02-26T02:36:10 | 341,816,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,363 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DeleteHostAccountRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DeleteHostAccount','rds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId) | [
"lukeli0306@gmail.com"
] | lukeli0306@gmail.com |
02af74f4a29c6196e2b244897585c90b4de5ef4b | 3319aeddfb292f8ab2602840bf0c1e0c2e5927be | /python/prime_fork.py | cedd0bf463d8c6abe68980d31cbe32a85f97c67b | [] | no_license | slaash/scripts | 4cc3eeab37f55d822b59210b8957295596256936 | 482fb710c9e9bcac050384fb5f651baf3c717dac | refs/heads/master | 2023-07-09T12:04:44.696222 | 2023-07-08T12:23:54 | 2023-07-08T12:23:54 | 983,247 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | #!/usr/bin/python3
import math
import sys
import os
import time
import resource
min=int(sys.argv[1])
max=int(sys.argv[2])
def is_prime(n):
prim=1
for i in range(2,int(math.sqrt(n))+1):
if n % i == 0:
prim=0
break
if prim == 1:
crt_pid=os.getpid()
print(str(crt_pid)+" returned: "+str(n))
return 0
parallel=10
runners=[]
for i in range(min,max+1):
pid=os.fork()
if pid==0:
is_prime(i)
exit()
else:
runners.append(pid)
if len(runners)>=parallel:
pid,code=os.wait()
# print("PID "+str(pid)+" exited with code "+str(code))
runners.remove(pid)
#print("Now we wait for all children to exit..."+str(len(runners))+" bitches left!")
for child in runners:
pid,code=os.waitpid(child,0)
# print("PID "+str(pid)+" exited with code "+str(code))
#print("...Done\n")
print("Max RSS: "+str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)+" kB")
| [
"rmoisa@yahoo.com"
] | rmoisa@yahoo.com |
15b0bb70da619df0472a6257e3e59dfa490506c9 | a1192c09f1fd18f75278a4fea6daa0d4e9ebc9d0 | /src/oscar/apps/promotions/admin.py | ed306c224e52a1de921e8f59c57183a43a41cfcb | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | abirafdirp/django-oscar-jne | d201439569a64720622aaa8dd84ce248981eedd4 | 70272b842316e8df57b0bc8a0dc669c3af4ec8f9 | refs/heads/master | 2020-04-15T07:16:15.086429 | 2016-03-12T13:11:52 | 2016-03-12T13:11:52 | 50,660,159 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | from django.contrib import admin
from oscar.apps.promotions.models import Image, MultiImage, RawHTML, \
HandPickedProductList, OrderedProduct, AutomaticProductList, TabbedBlock, \
PagePromotion, KeywordPromotion, SingleProduct
class OrderProductInline(admin.TabularInline):
model = OrderedProduct
class HandPickedProductListAdmin(admin.ModelAdmin):
inlines = [OrderProductInline]
class PagePromotionAdmin(admin.ModelAdmin):
list_display = ['page_url', 'content_object', 'position']
exclude = ['clicks']
class KeywordPromotionAdmin(admin.ModelAdmin):
list_display = ['keyword', 'position', 'clicks']
readonly_fields = ['clicks']
admin.site.register(Image)
admin.site.register(MultiImage)
admin.site.register(RawHTML)
admin.site.register(HandPickedProductList, HandPickedProductListAdmin)
admin.site.register(AutomaticProductList)
admin.site.register(TabbedBlock)
admin.site.register(PagePromotion, PagePromotionAdmin)
admin.site.register(KeywordPromotion, KeywordPromotionAdmin)
admin.site.register(SingleProduct)
| [
"abirafdiraditya@gmail.com"
] | abirafdiraditya@gmail.com |
e379d00a9c82b1968e6e39de2261cb910b0b96a0 | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/comparison/test_chart_format15.py | 0b9db557714fd2ec39b200bf43bdee7b34b5d62c | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 1,493 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format15.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [42401792, 42403712]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'trendline': {'type': 'linear'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.set_legend({'delete_series': [2, 0]})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
b14438ae29d90a802dd789e9b52ddc9f2f162bd6 | 3b93339109b69d1da43bbfbc17d40700594828a5 | /tests/test_rankbm25.py | 9cacb61b590d390820b308def6796e0e08c9ca11 | [
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | shibing624/text2vec | b6fda0958eddc436c936e25c1d373ab368c4e1f7 | c761ffc17b2eca0eb3795169f4d6acd4573c8f20 | refs/heads/master | 2023-07-20T01:38:21.139689 | 2023-07-17T08:31:19 | 2023-07-17T08:31:19 | 221,121,871 | 2,882 | 286 | Apache-2.0 | 2023-08-07T04:16:13 | 2019-11-12T03:27:57 | Python | UTF-8 | Python | false | false | 1,785 | py | # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import sys
import unittest
sys.path.append('..')
from text2vec.utils.rank_bm25 import BM25Okapi
from text2vec.utils.tokenizer import segment
class RankTestCase(unittest.TestCase):
def test_en_topn(self):
"""测试en文本bm25 topn"""
corpus = [
"Hello there good man!",
"It is quite windy in London",
"How is the weather today?"
]
tokenized_corpus = [doc.split(" ") for doc in corpus]
bm25 = BM25Okapi(tokenized_corpus)
query = "windy London"
tokenized_query = query.split(" ")
doc_scores = bm25.get_scores(tokenized_query)
print(doc_scores)
self.assertTrue(' '.join(["{:.3f}".format(i) for i in doc_scores]) == "0.000 0.937 0.000")
a = bm25.get_top_n(tokenized_query, corpus, n=2)
print(a)
self.assertEqual(a, ['It is quite windy in London', 'How is the weather today?'])
def test_zh_topn(self):
"""测试zh文本bm25 topn"""
corpus = ['女网红能火的只是一小部分', '当下最火的男明星为鹿晗', "How is the weather today?", "你觉得哪个女的明星最红?"]
tokenized_corpus = [segment(doc) for doc in corpus]
bm25 = BM25Okapi(tokenized_corpus)
query = '当下最火的女的明星是谁?'
tokenized_query = segment(query)
doc_scores = bm25.get_scores(tokenized_query)
print(doc_scores)
a = bm25.get_top_n(tokenized_query, corpus, n=3)
print(a)
self.assertEqual(a, ['你觉得哪个女的明星最红?', '当下最火的男明星为鹿晗', '女网红能火的只是一小部分'])
if __name__ == '__main__':
unittest.main()
| [
"shibing624@126.com"
] | shibing624@126.com |
e32c2d5ab8e528c7c4ac977dfc5613a162234b2c | 8bdce915174678a90a6be811ea91b50930b9d26a | /elk/shares/analysis/new_stock_change_analysis.py | b5aa28cc1f4229488c7278d64809ae804e59118e | [] | no_license | CharlesBird/Resources | daefffef8fb3735e656cd0a3bf400d5e2ff85cc0 | 517ac7b7992a686fa5370b6fda8b62663735853c | refs/heads/master | 2022-12-15T02:54:56.530940 | 2020-02-29T14:33:43 | 2020-02-29T14:33:43 | 109,668,108 | 1 | 1 | null | 2022-12-08T05:04:25 | 2017-11-06T08:34:30 | Jupyter Notebook | UTF-8 | Python | false | false | 498 | py | from elasticsearch import Elasticsearch
import tushare as ts
from datetime import datetime
from pprint import pprint
es = Elasticsearch(['47.103.32.102:9200'])
TOKEN = '137e3fc78e901b8463d68a102b168b2ea0217cb854abfad24d4dc7f7'
pro = ts.pro_api(TOKEN)
sh_list_datas = pro.stock_basic(exchange='', list_status='', fields='ts_code, list_date')
stocks = sh_list_datas.to_dict('records')
for stock in stocks:
yy = stock['list_date'][:6]
# print(yy)
if yy == '201908':
print(stock) | [
"1016784928@qq.com"
] | 1016784928@qq.com |
2d3fa65a4864ae40e51ab272bbe29ffff39d68d3 | 9d994684359b96ded45c64281b5feca8db58dad5 | /Python/800 - IV/551A.py | aa9c8b75dec28681cac6add03a2c787cc6a2022d | [] | no_license | KuroKousuii/Codeforces | 7f38c67914ff6a1adb2d325d9f316e5fd122e463 | 80f5d46e2c7364ba52184bbf567e012235940be4 | refs/heads/main | 2023-07-17T14:24:41.713893 | 2021-09-05T18:51:39 | 2021-09-05T18:51:39 | 376,141,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | x = int(input())
ans = 1
arr = [*map(int, input().split())]
for i in range(x):
start = 1
for j in range(x):
if arr[i] < arr[j]:
start += 1
print(start, end=" ")
print() | [
"84732337+KuroKousuii@users.noreply.github.com"
] | 84732337+KuroKousuii@users.noreply.github.com |
b4a3e0119573cbd3366666cd99c31bbb348732f2 | 7d4e8492de331f8bed4ef625132a3c8bb1e44b3e | /src/helpers/data_load_helper.py | b6d687dc6e8be8b27abc2c9a0e75f42628769342 | [
"ISC"
] | permissive | uk-gov-mirror/dwp.dataworks-behavioural-framework | f6d1d7a94a2b18be659acd444ae8d88615e4a162 | d7c143c0fc0c4ae9e86ece34ccc1a480df1f65ad | refs/heads/master | 2023-04-09T01:09:37.313078 | 2021-04-14T15:43:44 | 2021-04-14T15:43:44 | 356,707,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,797 | py | import os
import datetime
from helpers import console_printer, template_helper
def generate_arguments_for_historic_data_load(
correlation_id,
topics,
s3_base_prefix,
s3_suffix,
default_topic_list,
skip_earlier_than,
skip_later_than,
):
"""Works out the topics needed for snapshot sender based on the passed in overrides.
Keyword arguments:
correlation_id -- unique id for this test run
topics -- comma delimited list of topics or "ALL" the use default list
s3_base_prefix -- the s3 location in the bucket to load files from
s3_suffix -- comma delimited list of suffixes to add to the prefix or None
default_topic_list -- if topics is ALL then this comma delimited list is used for the topics
skip_earlier_than -- format of date time must be `yyyy-MM-dd'T'HH:mm:ss.SSS` with an optional literal `Z` at the end (or None)
skip_later_than -- format of date time must be `yyyy-MM-dd'T'HH:mm:ss.SSS` with an optional literal `Z` at the end (or None)
"""
if not s3_suffix:
s3_full_prefix = s3_base_prefix
elif "," in s3_suffix:
all_prefixes = []
for s3_single_suffix in s3_suffix.split(","):
all_prefixes.append(os.path.join(s3_base_prefix, s3_single_suffix))
s3_full_prefix = ",".join(all_prefixes)
else:
s3_full_prefix = os.path.join(s3_base_prefix, s3_suffix)
console_printer.print_info(
f"Attempting to generate arguments for historic data load"
)
console_printer.print_info(
f"Topics list is '{topics}', s3 base prefix is '{s3_full_prefix}', correlation id is '{correlation_id}', skip earlier than is '{skip_earlier_than}' and skip later than is '{skip_later_than}'"
)
topics_qualified = default_topic_list if topics.lower() == "all" else topics
return f"{topics_qualified} {s3_full_prefix} {skip_earlier_than} {skip_later_than} {correlation_id}"
def generate_arguments_for_corporate_data_load(
correlation_id,
topics,
s3_base_prefix,
metadata_table_name,
default_topic_list,
file_pattern,
skip_earlier_than,
skip_later_than,
partition_count,
prefix_per_execution,
):
"""Works out the topics needed for snapshot sender based on the passed in overrides.
Keyword arguments:
correlation_id -- unique id for this test run
topics -- comma delimited list of topics or "ALL" the use default list
s3_base_prefix -- the s3 location in the bucket to load files from
metadata_table_name -- the table for the metadata store writes
default_topic_list -- if topics is ALL then this comma delimited list is used for the topics
file_pattern -- the file pattern for the input files
skip_earlier_than -- format of date time must be `yyyy-MM-dd` (or None)
skip_later_than -- format of date time must be `yyyy-MM-dd` (or None)
partition_count -- number of partitions to split the run by (or None)
prefix_per_execution -- true (as a string) to enable a prefix per execution when running the jar
"""
console_printer.print_info(
f"Attempting to generate arguments for corporate data load"
)
console_printer.print_info(
f"Topics list is '{topics}', s3 base prefix is '{s3_base_prefix}', correlation id is '{correlation_id}',"
f" file pattern is '{file_pattern}', metadata table name is '{metadata_table_name}',"
f" prefix per execution setting is '{prefix_per_execution}',"
f" start date is '{skip_earlier_than}' and end date is '{skip_later_than}'"
)
topics_qualified = default_topic_list if topics.lower() == "all" else topics
start_date = "NOT_SET" if skip_earlier_than is None else skip_earlier_than
end_date = "NOT_SET" if skip_later_than is None else skip_later_than
partitions = "NOT_SET" if partition_count is None else partition_count
per_execution = "NOT_SET" if prefix_per_execution is None else prefix_per_execution
return f'{topics_qualified} {s3_base_prefix} {metadata_table_name} {correlation_id} "{file_pattern}" {start_date} {end_date} {partitions} {per_execution}'
def generate_corporate_data_s3_prefix(base_prefix, database, collection, timestamp):
"""Generates the S3 prefix to upload a file to for the corporate data.
Keyword arguments:
base_prefix -- the base location for the corporate data
database -- the database for this file
collection -- the collection name for this file
timestamp -- the timestamp for the file being sent to s3
"""
day_padded = "{:02d}".format(timestamp.day)
month_padded = "{:02d}".format(timestamp.month)
return os.path.join(
base_prefix,
str(timestamp.year),
str(month_padded),
str(day_padded),
database,
collection,
)
| [
"noreply@github.com"
] | uk-gov-mirror.noreply@github.com |
a95fd9b252c6a9aef8b741ea3b6f2077d542ae9c | b83752ae43ed3e08b17fa6911c13885fec1ed0b5 | /InterviewQuestion/easy.py | d833d614535a8ca893a92d18dc82c1f46b04f4eb | [] | no_license | ephremworkeye/python-advanced | f25cf4f939d53f29a588722e482b6f2923a035f4 | 5ba305f221433b904323182ca18d85111d14bfd0 | refs/heads/master | 2023-07-31T00:11:35.288263 | 2021-09-23T10:59:44 | 2021-09-23T10:59:44 | 393,608,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | def majority_element_indexes(lst):
'''
Return a list of the indexes of the majority element.
Majority element is the element that appears more than
floor(n / 2) times.
If there is no majority element, return []
>>> majority_element_indexes([1, 1, 2])
[0, 1]
>>> majority_element_indexes([1, 2])
[]
>>> majority_element_indexes([])
[]
'''
# n = len(lst)//2
# [1, 1, 2] {1: 2, 2, 1 }
n = len(lst) // 2
count = 0
seen = {}
result = []
for index, val in enumerate(lst):
if val not in seen:
seen[val] = index
count += 1
else:
count += 1
if count > n:
res.append(seen[val])
res.append(index)
| [
"ephremworkeye@gmail.com"
] | ephremworkeye@gmail.com |
675c3f2d3fe7c16a47d1d6e4ed147df6f7fc4eb0 | 1ae3c8844ffd15bbb214a007b0b754431e4eb820 | /setup.py | 1b06a3e0fdf246f19753f59aeee5b5aa244266d1 | [
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | permissive | SpectralAngel/django-select2-forms | 37356482ea828e2316c3109c4966ace4f2e0c337 | a1e7b48ade3b0a6bfbb3dbf5ddb880634b56da08 | refs/heads/master | 2021-01-18T00:56:16.161479 | 2015-12-03T16:21:34 | 2015-12-03T16:21:34 | 17,144,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | #!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup_kwargs = {}
try:
setup_kwargs['long_description'] = open('README.rst').read()
except IOError:
# Use the create_readme_rst command to convert README to reStructuredText
pass
setup(
name='django-select2-forms',
version='1.1.25',
description='Django form fields using the Select2 jQuery plugin',
author='Frankie Dintino',
author_email='fdintino@theatlantic.com',
url='https://github.com/theatlantic/django-select2-forms',
packages=find_packages(),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
include_package_data=True,
zip_safe=False,
entry_points={
'distutils.commands': [
'create_readme_rst = select2.build:create_readme_rst',
],
},
**setup_kwargs)
| [
"fdintino@theatlantic.com"
] | fdintino@theatlantic.com |
c6854028c1c854f9823cc931e521b72d013b6fc3 | 68459c9f5ef5847079f373ee59c535327890bb2c | /PatientModule/admin.py | 9456e3d9da2db8c526a085624bfcc85d7f68a506 | [] | no_license | sanathsasi01/EcCovid | 4dc34a5c72ea363c0ff0d75624b389f6050775a1 | b21038d2265d5363f1069ca8ebdb555e19891dd1 | refs/heads/master | 2023-05-01T05:18:29.463143 | 2021-05-11T09:58:37 | 2021-05-11T09:58:37 | 365,087,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(PatientDetails)
admin.site.register(Beds)
# admin.site.register(bedCount)
admin.site.register(FreeBeds)
| [
"sanathsasi01@gmail.com"
] | sanathsasi01@gmail.com |
c7e5725f4180ea2af34ce127356f310f29e6410c | 6fbaa8d9e06eec449a228b9767166d7026b917d4 | /Combat/data.py | 1f713ef39986e0286a2155d72e844969cc890736 | [] | no_license | BrokenShell/PW-DiscordBot | 86a3f30359476e248501af2f613ac013eb416ba8 | 97790a25afc071c00b70030e0a40e58d11824463 | refs/heads/master | 2022-12-09T13:40:51.465051 | 2020-09-05T23:03:53 | 2020-09-05T23:03:53 | 293,138,244 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | """ Data Model for Discord Bot """
from os import getenv
from typing import List
from pymongo import MongoClient
class DataModel:
def _connect(self):
""" MongoDB Table Connection """
db = MongoClient(
f"mongodb+srv://{getenv('MONGODB_USER')}:{getenv('MONGODB_PASS')}"
f"@{getenv('MONGODB_URI')}/test?retryWrites=true&w=majority"
)
return db.discord_bot.characters
def push(self, character: dict):
db = self._connect()
db.insert_one(character)
def load(self, query: dict) -> dict:
db = self._connect()
return db.find_one(query)
def find(self, query: dict) -> List[dict]:
db = self._connect()
return list(db.find(query))
def update(self, query: dict, character: dict):
db = self._connect()
db.replace_one(query, character, upsert=True)
if __name__ == '__main__':
db = DataModel()
db.push({
'Name': 'Capt. Morgause',
'Health': 40,
'Weapon': 'Dagger',
'Player': 'Broken',
})
| [
"webmaster@sharpdesigndigital.com"
] | webmaster@sharpdesigndigital.com |
b841db80d9a13cf21d3d606d92aa945cf7d8a0f7 | 37dad1d64aa5f05311f9281e0c48af8cadb8e1b1 | /mylib/openpose.py | 4956bae606939fbafc47115484b8726ff50ad274 | [] | no_license | nainauy/Detect-Hand-Grasping-Object | 1008acc5a8fad5046f0b7aa28e9ce71f25e5d1ec | 5257f9881747a7d56b6015e77d66517abb215941 | refs/heads/master | 2021-05-19T12:56:52.831945 | 2019-04-19T21:54:10 | 2019-04-19T21:54:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,073 | py |
import numpy as np
import cv2
import sys, os, time, argparse, logging
import simplejson
import argparse
import math
CURR_PATH = os.path.dirname(os.path.abspath(__file__))+"/"
# Openpose ==============================================================
sys.path.append(CURR_PATH + "../tf-pose-estimation")
from tf_pose.networks import get_graph_path, model_wh
from tf_pose.estimator import TfPoseEstimator
from tf_pose import common
import tensorflow as tf
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Settings ===============================================================
DRAW_FPS = True
MAX_FRACTION_OF_GPU = 0.3
# Human pose detection ==============================================================
class SkeletonDetector(object):
# This func is mostly copied from https://github.com/ildoonet/tf-pose-estimation
def __init__(self, model=None, image_size=None):
if model is None:
model = "cmu"
if image_size is None:
image_size = "432x368" # 7 fps
# image_size = "336x288"
# image_size = "304x240" # 14 fps
models = set({"mobilenet_thin", "cmu"})
self.model = model if model in models else "mobilenet_thin"
# parser = argparse.ArgumentParser(description='tf-pose-estimation run')
# parser.add_argument('--image', type=str, default='./images/p1.jpg')
# parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin')
# parser.add_argument('--resize', type=str, default='0x0',
# help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
# parser.add_argument('--resize-out-ratio', type=float, default=4.0,
# help='if provided, resize heatmaps before they are post-processed. default=1.0')
self.resize_out_ratio = 4.0
# args = parser.parse_args()
w, h = model_wh(image_size)
tf_config = tf.ConfigProto()
tf_config.gpu_options.per_process_gpu_memory_fraction=MAX_FRACTION_OF_GPU
# tf_config = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)#https://stackoverflow.com/questions/34199233/how-to-prevent-tensorflow-from-allocating-the-totality-of-a-gpu-memory
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(self.model),
target_size=(432, 368),tf_config=tf_config)
else:
e = TfPoseEstimator(get_graph_path(self.model), target_size=(w, h), tf_config=tf_config)
# self.args = args
self.w, self.h = w, h
self.e = e
self.fps_time = time.time()
self.cnt_image = 0
def detect(self, image):
self.cnt_image += 1
if self.cnt_image == 1:
self.image_h = image.shape[0]
self.image_w = image.shape[1]
self.scale_y = 1.0 * self.image_h / self.image_w
t = time.time()
# Inference
humans = self.e.inference(image, resize_to_default=(self.w > 0 and self.h > 0),
# upsample_size=self.args.resize_out_ratio)
upsample_size=self.resize_out_ratio)
# Print result and time cost
elapsed = time.time() - t
logger.info('inference image in %.4f seconds.' % (elapsed))
return humans
def draw(self, img_disp, humans):
img_disp = TfPoseEstimator.draw_humans(img_disp, humans, imgcopy=False)
def draw_fps(self, img_disp):
# logger.debug('show+')
if DRAW_FPS:
cv2.putText(img_disp,
# "Processing speed: {:.1f} fps".format( (1.0 / (time.time() - self.fps_time) )),
"fps = {:.1f}".format( (1.0 / (time.time() - self.fps_time) )),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255), 2)
self.fps_time = time.time()
def humans_to_skelsList(self, humans, scale_y = 1.0): # get (x, y * scale_y)
if scale_y is None:
scale_y = self.scale_y
skelsList = []
NaN = 0
for human in humans:
skeleton = [NaN]*(18*2)
for i, body_part in human.body_parts.items(): # iterate dict
idx = body_part.part_idx
skeleton[2*idx]=body_part.x
skeleton[2*idx+1]=body_part.y * scale_y
skelsList.append(skeleton)
return skelsList
def get_hands(self, humans):
skelsList = self.humans_to_skelsList(humans)
def predict_hand_position(skel, idx_wrist,
ratio = 1.0 # (wrist to hand)/(wrist to elbow)
):
idx_elbow = idx_wrist - 1
wx, wy = skel[idx_wrist*2], skel[idx_wrist*2+1]
ex, ey = skel[idx_elbow*2], skel[idx_elbow*2+1]
hx = wx + (wx - ex) * ratio
hy = wy + (wy - ey) * ratio
return [hx, hy]
NaN = 0
LEFT_WRIST = 4
RIGH_WRIST = 7
hands = []
for skeleton in skelsList:
if skeleton[LEFT_WRIST] != NaN:
hands.append(predict_hand_position(skeleton, LEFT_WRIST))
if skeleton[RIGH_WRIST] != NaN:
hands.append(predict_hand_position(skeleton, RIGH_WRIST))
return hands
def get_hands_in_xy(self, humans):
hands = self.get_hands(humans)
# Change coordinate to pixel
for i, hand in enumerate(hands):
x = int(hand[0]*self.image_w)
y = int(hand[1]*self.image_h)
hands[i] = [x, y]
return hands
@staticmethod
def get_ith_skeleton(skelsList, ith_skeleton=0):
res = np.array(skelsList[ith_skeleton])
return res | [
"felixchenfy@gmail.com"
] | felixchenfy@gmail.com |
9237414c6b0b1f7380a077644b65acb4e20307cd | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /important_man_or_early_way/hand_or_big_government/problem/see_part/own_group/time.py | 85ff8a4c722c7de152e5218f8e2db8d6cb4559b1 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py |
#! /usr/bin/env python
def large_fact_and_place(str_arg):
want_case_in_own_hand(str_arg)
print('few_place')
def want_case_in_own_hand(str_arg):
print(str_arg)
if __name__ == '__main__':
large_fact_and_place('right_time')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
a8520764cc51603c97d98b8dccdc4b3e2e4463ad | 84cf3908371c9d346a3798e682389eb1a2692a99 | /backend/raspy_field_26162/urls.py | b307e62e68651c559347d873b9262ca71548014b | [] | no_license | crowdbotics-apps/raspy-field-26162 | 85e9c88ee2c2a5ae2063264601eb684c08d406a5 | edbc9aa1198cad09eca869541e595454a9dc97dc | refs/heads/master | 2023-04-27T13:25:41.449954 | 2021-05-05T16:48:24 | 2021-05-05T16:48:24 | 364,645,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,229 | py | """raspy_field_26162 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Raspy Field"
admin.site.site_title = "Raspy Field Admin Portal"
admin.site.index_title = "Raspy Field Admin"
# swagger
api_info = openapi.Info(
title="Raspy Field API",
default_version="v1",
description="API documentation for Raspy Field App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
03e7651977bd58571673a6fe4806e3d264bd8db3 | 792ae5d2a5c17af4f2ccfa582e3aeec569a6809a | /532. K-diff Pairs in an Array.py | 0bb439ae360ed565b3412197213c60206cf80f54 | [] | no_license | ADebut/Leetcode | 396b8b95ad5b5e623db2839bbfdec861c4c1731f | 7333d481e00e8c1bc5b827d1d4ccd6e4d291abd7 | refs/heads/master | 2020-07-05T18:48:27.504540 | 2019-10-28T10:51:43 | 2019-10-28T10:51:43 | 202,735,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | class Solution:
def findPairs(self, nums: List[int], k: int) -> int:
if (len(nums) < 2 or k < 0):
return 0
count = 0
nums.sort()
right = 0;
for i in range(len(nums)):
if (i > 0 and nums[i] == nums[i - 1]):
continue
right = max(right, i + 1)
while (right < len(nums)):
if (nums[right] - k == nums[i]):
count += 1
break
elif (nums[right] - k < nums[i]):
right += 1
else:
break
return count | [
"chen758@usc.edu"
] | chen758@usc.edu |
c4fd29805691979948d88f775ff088b7be085553 | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-cognitiveservices-search-visualsearch/azure/cognitiveservices/search/visualsearch/models/image_knowledge_py3.py | 637dc7e91a512d1856c7ae5899c35b500bdbe13d | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 2,288 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .response import Response
class ImageKnowledge(Response):
"""ImageKnowledge.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource. To use the URL,
append query parameters as appropriate and include the
Ocp-Apim-Subscription-Key header.
:vartype read_link: str
:ivar web_search_url: The URL to Bing's search result for this item.
:vartype web_search_url: str
:ivar tags: A list of visual search tags.
:vartype tags:
list[~azure.cognitiveservices.search.visualsearch.models.ImageTag]
:ivar image: Image object containing metadata about the requested image.
:vartype image:
~azure.cognitiveservices.search.visualsearch.models.ImageObject
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'tags': {'readonly': True},
'image': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[ImageTag]'},
'image': {'key': 'image', 'type': 'ImageObject'},
}
def __init__(self, **kwargs) -> None:
super(ImageKnowledge, self).__init__(**kwargs)
self.tags = None
self.image = None
self._type = 'ImageKnowledge'
| [
"noreply@github.com"
] | ashirey-msft.noreply@github.com |
1966a75d2154970b448f956360acf37fa4e7115b | 71ed291b47017982a38524b4ff8fe94aa947cc55 | /Array/LC414. Third Maximum Number.py | 64ff5c9fd66ddb6739767d005c10d680a4a605d5 | [] | no_license | pingting420/LeetCode_Algorithms | da83b77e8f37bd4f461b0a7e59c804871b6151e5 | f8786864796027cf4a7a8b0ad76e0b516cd99b54 | refs/heads/main | 2023-07-17T22:46:08.803128 | 2021-09-02T22:06:38 | 2021-09-02T22:06:38 | 375,401,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | #sort
#loc:should consider the situation that the number smaller than 3
def thirdMax(nums):
#set can help us delete the duplicte number
nums = list(set(nums))
nums.sort()
if len(nums)<3:
return nums[-1]
else:
return nums[-3]
| [
"bellapingting@gmial.com"
] | bellapingting@gmial.com |
2ed65ccdb34e6a493af9b66a4eaaa90eb364cb85 | 3f9e0e830a8472a37dafab95641eaed4c6dd1ac9 | /newpandas/root_to_txt.py | 5449bb22ee36be9af8e82ef5b9e33925b569175d | [] | no_license | robertej19/dvpip_ana | c475af97befa0eb5eb662f5f6c3333a5f44fcb71 | 486452ac0628dcde98b1cee329bbc23dc772aa26 | refs/heads/main | 2023-04-10T06:02:34.037258 | 2021-04-23T13:35:41 | 2021-04-23T13:35:41 | 335,967,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | #!/usr/bin/python
import uproot
from icecream import ic
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import subprocess
import os
import time
import shutil
from shutil import copyfile
#This project
from src.utils import filestruct
from src.utils import query_maker
from src.utils import file_maker
"""
tree.keys()
['nmb', 'Pp', 'Ppx', 'Ppy', 'Ppz', 'Ptheta', 'Pphi',
'Pvx', 'Pvy', 'Pvz', 'Pvt', 'PSector', 'Pbeta', 'Pstat',
'nml', 'Ep', 'Epx', 'Epy', 'Epz', 'Etheta', 'Ephi', 'Evx',
'Evy', 'Evz', 'Evt', 'Ebeta', 'Estat', 'ESector', 'nmg',
'Gp', 'Gpx', 'Gpy', 'Gpz', 'Gtheta', 'Gphi', 'Gvx', 'Gvy',
'Gvz', 'Gvt', 'GSector', 'Gbeta', 'Gstat', 'beamQ',
'liveTime', 'startTime', 'RFTime', 'helicity',
'helicityRaw', 'EventNum', 'RunNum', 'Q2', 'Nu', 'q', 'qx',
'qy', 'qz', 'W2', 'xB', 't', 'combint', 'mPpx', 'mPpy',
'mPpz', 'mPp', 'mmP', 'meP', 'Mpx', 'Mpy', 'Mpz', 'Mp',
'mm', 'me', 'mGpx', 'mGpy', 'mGpz', 'mGp', 'mmG', 'meG',
'Pi0p', 'Pi0px', 'Pi0py', 'Pi0pz', 'Pi0theta',
'Pi0phi', 'Pi0M', 'Pi0Sector', 'pIndex', 'gIndex1',
'gIndex2', 'trento', 'trento2', 'trento3']
"""
def root_to_txt(data_dir,output_dir):
data_list = os.listdir(data_dir)
file_maker.make_dir(output_dir)
total_counts = 0
for count,filename in enumerate(data_list):
print("on file {} out of {}, named {}".format(count+1,len(data_list),filename))
output_file_ending = filename.replace(".root",".txt")
file = uproot.open(data_dir+filename)
tree = file["T"]
q2 = tree["Q2"].array()
xB = tree["xB"].array()
t_mom = tree["t"].array()
trent1 = tree["trento"].array()
event_num = tree["EventNum"].array()
run_num = tree['RunNum'].array()
heli = tree["helicity"].array()
lumi = tree['beamQ'].array()
Nu = tree['Nu'].array()
#trent2 = tree["trento2"].array()
#trent3 = tree["trento3"].array()
#pi0M = tree['Pi0M'].array()
#filt_pi = []
#filt_trent = []
#filtering
#ic.disable()
output_file = open(output_dir+output_file_ending,"w")
output_file.write("{},{},{},{},{},{},{},{},{}\n".format("run",
"event","luminosity","helicity","Nu","q2","xb","t","phi",
))
for count,item in enumerate(q2):
#for count in range(0,10):
#For now just take 0th element of e.g. trent, phi, this needs to change
output_file.write("{},{},{},{},{},{},{},{},{}\n".format(run_num[count],
event_num[count],lumi[count],heli[count],Nu[count],q2[count],
xB[count],t_mom[count][0],trent1[count][0],)
)
print("done filtering")
#arr = np.array(filt_trent)
print("number of events is: {}".format(len(q2)))
total_counts += len(q2)
print("Done processing, total number of events is: {}".format(total_counts))
if __name__ == "__main__":
fs = filestruct.fs()
data_dir = fs.base_dir + fs.data_dir+fs.data_3_dir+fs.data_basename
output_dir = fs.base_dir + fs.data_dir+fs.data_4_dir+fs.data_basename
root_to_txt(data_dir,output_dir)
| [
"robertej@mit.edu"
] | robertej@mit.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.