blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6bbd6c656d8161a8b8be3d02844a8220aa9d5b9f | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /onos_id_bug_fixed_ids_file_blackbox_mcs2/interreplay_10_l_3/replay_config.py | 82595f379760b10766728d353bd972790e96556f | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow.replayer import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./start-onos.sh start', label='c1', address='192.168.56.11', cwd='/home/mininet/ONOS', controller_type='onos', kill_cmd='./start-onos.sh stop', restart_cmd='./start-onos.sh stop'), ControllerConfig(start_cmd='./start-onos.sh start', label='c2', address='192.168.56.12', cwd='/home/mininet/ONOS', controller_type='onos', kill_cmd='./start-onos.sh stop', restart_cmd='./start-onos.sh stop')],
topology_class=MeshTopology,
topology_params="num_switches=2",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
ignore_interposition=True,
kill_controllers_on_exit=False)
control_flow = Replayer(simulation_config, "experiments/onos_id_bug_fixed_ids_file_blackbox_mcs2/interreplay_10_l_3/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='check_for_file',
bug_signature="bug_file_detected")
| [
"a.hassany@gmail.com"
] | a.hassany@gmail.com |
522ca045013c70ddf30198f05b93ecc0ea09d608 | 33524b5c049f934ce27fbf046db95799ac003385 | /Дистанционная_подготовка/Программирование_на_python/11_Арифметика/zadacha_G.py | dab1de67ae08f8d442d48ab597d13da15fe422b7 | [] | no_license | mgbo/My_Exercise | 07b5f696d383b3b160262c5978ad645b46244b70 | 53fb175836717493e2c813ecb45c5d5e9d28dd23 | refs/heads/master | 2022-12-24T14:11:02.271443 | 2020-10-04T04:44:38 | 2020-10-04T04:44:38 | 291,413,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py |
'''
Сумма двух квадратов
'''
n = int(input())
def square(n):
return n*n
for i in range(1, 20):
x1 = square(i)
if x1 > n:
break
for j in range(1, 20):
value = x1 + square(j)
if value == n:
print (i,j)
elif value > n:
print ('Imposible')
break
| [
"mgbo433@gmail.com"
] | mgbo433@gmail.com |
cf3aef8d67017da08e3ac0f4469803969c4c23ce | 71c0121fb47df8ce11f33e7617dd262525ffea81 | /commandline/manager.py | 34727999ac0f2d6dedbde1132e0ac273ca1b4085 | [] | no_license | igor35hh/PythonTraining | 33d09b045b0f8676f23a5b43410aaa6a7c6a5631 | 020bc274bba0ffb70f1cdc45e18ea8b6467110fb | refs/heads/master | 2021-05-01T23:46:54.919344 | 2018-03-11T21:30:30 | 2018-03-11T21:30:30 | 77,922,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py |
from Person import Person
class Manager(Person):
def __init__(self, name, age, pay):
Person.__init__(self, name, age, pay, 'mahager')
def giveRaise(self, percent, bonus=0.1):
Person.giveRaise(self, percent + bonus)
if __name__ == '__main__':
tom = Manager(name='Tom Doe', age=50, pay=5000)
print(tom.lastName())
tom.giveRaise(.20)
print(tom.pay)
bob = Person(name='Bob Smith', age=42, pay=3000)
sue = Person(name='Sue Jones', age=45, pay=4000)
db = {bob, sue, tom}
for obj in db:
obj.giveRaise(.30)
for obj in db:
print(obj.lastName(), '=>', obj.pay)
print(obj)
for d in obj.__dict__:
print(obj.__dict__[d]);
for v in obj.__dict__.values():
print(v); | [
"igor35hh@gmail.com"
] | igor35hh@gmail.com |
37415d93e5786fda13e764919a9b90057f5fdbdc | db1dcf7cf7218e0de8eb7fa0da709effa507c3bf | /Mastering matplotlib/SF_MM/02-architecture/lib/modfind.py | dfc21e7e0c2b219c2e391cba3fd0d5ae28ad14d4 | [] | no_license | AaronCHH/B_PYTHON_matplotlib | a65da48771ce8248d51ee054eab91eeb3ce50e74 | 84c809800b3b797a09a5abfc76860ca7f7df8b80 | refs/heads/master | 2021-01-19T10:17:05.132372 | 2017-04-10T19:29:54 | 2017-04-10T19:29:54 | 87,849,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | from collections import Counter, OrderedDict
from modulefinder import Module, ModuleFinder
from typecheck import typecheck
import typecheck as tc
class CustomFinder(ModuleFinder):
def __init__(self, include: list=None, exclude: list=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.debug = False
self.cf_include = include or ["matpl", "mpl"]
self.cf_exclude = exclude or ["matplotlib._", "ft2font", "ttconv"]
self.cf_imports = OrderedDict()
self.cf_weights = Counter()
@typecheck
def matches(self, name: str) -> bool:
include = True in [name.startswith(x) for x in self.cf_include]
exclude = True in [name.startswith(x) for x in self.cf_exclude]
if include and not exclude:
return True
return False
@typecheck
def import_hook(self, name: str, caller: tc.optional(Module)=None,
fromlist: tc.optional(list)=None,
level: int=-1) -> tc.optional(Module):
if self.matches(name):
if caller:
if self.debug:
print(caller.__name__, " -> ", name)
self.cf_weights[name] += 1
self.cf_imports[(caller.__name__, name)] = 1
super().import_hook(name, caller, fromlist, level)
__all__ = ["CustomFinder"]
del Module, ModuleFinder, typecheck, tc
| [
"aaronhsu219@gmail.com"
] | aaronhsu219@gmail.com |
41f0e0207d7368fe936b6b7dd33c3d5918255fc7 | 744594f30c5e283f6252909fc68102dd7bc61091 | /2017/13/13b_solution.py | de65bf6b0591cf4c57cbfdbf892acb60bc33e3ad | [
"MIT"
] | permissive | vScourge/Advent_of_Code | 84f40c76e5dc13977876eea6dbea7d05637de686 | 36e4f428129502ddc93c3f8ba7950aed0a7314bb | refs/heads/master | 2022-12-20T22:12:28.646102 | 2022-12-15T22:16:28 | 2022-12-15T22:16:28 | 160,765,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,671 | py | """
Advent of Code 2017
input is: input.txt
"""
DIR_DOWN = 0
DIR_UP = 1
class Layer( ):
def __init__( self, depth, range ):
self.depth = depth
self.range = range
self.scan_pos = 0
self.scan_dir = DIR_DOWN
def move( self ):
if self.scan_dir == DIR_DOWN:
self.scan_pos += 1
if self.scan_pos == self.range - 1:
self.scan_dir = DIR_UP
else:
self.scan_pos -= 1
if self.scan_pos == 0:
self.scan_dir = DIR_DOWN
def __repr__( self ):
return '<Layer {0}>'.format( self.depth )
def delay_then_run( layers, delay ):
pos = -1
count = 0
# Reset all scanners
for layer_id in layers:
layer = layers[ layer_id ]
layer.scan_pos = 0
layer.scan_dir = DIR_DOWN
while pos <= max_depth:
if count >= delay:
pos += 1
if pos in layers:
layer = layers[ pos ]
if layers[ pos ].scan_pos == 0:
print( 'delay {0}, caught on layer {1}'.format( delay, pos ) )
return False
# move all scanners
for layer in layers.values( ):
layer.move( )
count += 1
return True
if __name__ == '__main__':
layers = { }
max_depth = 0
# Build dict of layer objects
for line in open( 'input.txt', 'r' ):
split = line.split( ':' )
layer_depth = int( split[ 0 ] )
layer_range = int( split[ 1 ].strip( ) )
layers[ layer_depth ] = Layer( layer_depth, layer_range )
max_depth = max( max_depth, layer_depth )
# Delay N picoseconds before run, and keep repeating with longer
# delays until we make it across
delay = 0
while True:
success = delay_then_run( layers, delay )
if success:
break
delay += 1
print( 'delay =', delay )
print( 'done' ) | [
"adam.pletcher@gmail.com"
] | adam.pletcher@gmail.com |
e9cab789778a120d62afba5b2378b2ea14e862b2 | 0af18096e2d024cc1e069b16e7c17d944c6f96e8 | /backend/naiosh_4776/wsgi.py | 70f7738558b87649acf9f02b276b15f0b7d28243 | [] | no_license | crowdbotics-apps/naiosh-4776 | 7ed8ee5c42efa74da842a36d55fb5acb68e68291 | 27386cc9d311d965e87b086ff10b44f469dd2a96 | refs/heads/master | 2022-12-12T03:43:11.995902 | 2019-06-17T10:09:09 | 2019-06-17T10:09:09 | 192,322,848 | 0 | 0 | null | 2022-12-03T14:01:00 | 2019-06-17T10:09:03 | JavaScript | UTF-8 | Python | false | false | 400 | py | """
WSGI config for naiosh_4776 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "naiosh_4776.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7b0cb2d2f3dee1ab0cc39a99d8091729a0a93698 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/ndxkee009/question4.py | 5473e314c72caf0b2087d06f5bbfde1e8a2e5a5d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | #Keegan Naidoo
#NDXKEE009
import math
s=eval(input("Enter the starting point N: \n"))
e=eval(input("Enter the ending point M: \n"))
s1=str(s)
e1=str(e)
c=1
print("The palindromic primes are:")
for i in range(s+1,e):
a=s+c
#print(a)
a1=str(a)
#print(a1)
#print(a1[::-1])
if all(i%x!=0 for x in range(2,int(math.sqrt(i))+1)):
if(a1==a1[::-1]):
print(a1)
c=c+1
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
8a3ae0b145000c2ff07323b20a3e821a08a00604 | b500996a0b29829fde6afe8b23178ca9df4a239d | /rydinfap/src/procdata/procxml.py | 85d8990856b92ff7752484b6827873e33fa16a25 | [] | no_license | eocampo2000/test-code | 48c4d444e323eef5e6fe7e61b018952ef3cd4134 | 49328664243e1a9daf9c567d1aaaa19fd4654c02 | refs/heads/master | 2016-08-11T07:35:31.346464 | 2016-02-13T12:33:55 | 2016-02-13T12:33:55 | 51,642,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,011 | py | '''
Created on May 15, 2012
@author: eocampo
XML files parser. Please specify the main LAYOUT.
TODO encapsulate it into a class
EO 20150313 : Added parseCanCPI, _parseCanCPITable methods.
'''
__version__ = '20150313'
import utils.fileutils as fu
import xml.etree.ElementTree as ET
import sys
import string as ST
from bs4 import BeautifulSoup
# Use for informatica schedules
#<IS_SCHED>
# <FOLDER>
# <WF sched="1">wkf_name</WF>
# <WF sched="1">wkf_name</WF>
# </FOLDER>
#</IS_SCHED>
sched = []
unsched = []
# Method parses <WF status="1">wkf_name</WF>
def _parseFolder(fld,node):
for elem in node:
if elem.tag == 'wkf':
sc = elem.get('sched')
wf = ST.strip(elem.text)
if sc == '1' : sched.append('%s.%s' % (fld,wf))
else : unsched.append('%s.%s' % (fld,wf))
# Method returns rc 0 if no error and the sched and unsched lists.
def parseSched(fn):
f = fu.openFile(fn,'r')
if f is None : return (1, [], [])
try:
tree = ET.parse(f)
# Empty tree
if tree == '' : return (2, [], [])
rt = tree.getroot()
# 1- Check TREE root
if ST.upper(rt.tag) != 'IS_SCHED' : return (3, [],[])
for elem in tree.getiterator():
for e in elem:
if ST.lower(e.tag) == 'folder' :
_parseFolder(e.get('name'), e.getchildren())
except ET.ParseError:
print ("Error %s \t %s " % (sys.exc_type, sys.exc_value))
return (4, [],[])
finally : return 0, sched, unsched
# Method to parse CPI Table
def _parseCanCPITable(rows,sep='\t'):
d =[]
for row in rows:
cells = row.findChildren('td')
if len(cells) < 3 : continue
i = 0
wline = "CAN"
for cell in cells:
i = i + 1
value = cell.string
value = value.replace("-", sep)
if i < 3:
wline = wline + sep + value
else:
d.append(wline + '\n')
break
return d
def parseCanCPI(fn):
data = fu.readFile(fn)
if data is None or data is '' : return []
soup = BeautifulSoup(data)
table = soup.find("table", { "class" : "table table-bordered table-striped table-hover cpi" })
rows = table.findChildren(['tr'])
return _parseCanCPITable(rows)
def test_schd():
fn = r'C:\infa_support\schedules\sched.xml'
rc,s,u = parseSched(fn)
#parseSched(fn)
print "rc = ", rc, "\tsched = ", s , "\tunsched ", u
def test_cpi():
fn = 'C:\\apps\\cpi_data_us.html'
d = parseCanCPI(fn)
#parseSched(fn)
print "len(d ) = ", len(d), "data= ", d
if __name__ == "__main__":
#test_schd()
test_cpi() | [
"eocampo1000@hotmail.com"
] | eocampo1000@hotmail.com |
bb2a5c272cb0c24dd423ee814a9a892c61bfe4e2 | 1c40857067b4d92e4efe161d154927c928548a7d | /workery/tenant_api/views/staff_comment.py | 168d92c62878ef8fe2ef89a2cc6ead80bac6836b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rahulyhg/workery-django | 9331eb1b87b02407a721e9d512e3d60cb22af314 | 73fd500fefcf4212cf07071e16cf676e173ddea6 | refs/heads/master | 2020-06-29T08:34:03.529503 | 2019-08-03T21:18:09 | 2019-08-03T21:18:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | # -*- coding: utf-8 -*-
from ipware import get_client_ip
from django_filters.rest_framework import DjangoFilterBackend
from django.conf.urls import url, include
from django.shortcuts import get_list_or_404, get_object_or_404
from rest_framework import filters
from rest_framework import generics
from rest_framework import authentication, viewsets, permissions, status
from rest_framework.response import Response
from shared_foundation.custom.drf.permissions import IsAuthenticatedAndIsActivePermission
from tenant_api.pagination import TinyResultsSetPagination
from tenant_api.permissions.staff import (
CanListCreateStaffPermission,
CanRetrieveUpdateDestroyStaffPermission
)
from tenant_api.serializers.staff_comment import (
StaffCommentListCreateSerializer,
)
from tenant_foundation.models import Staff
class StaffCommentListCreateAPIView(generics.ListCreateAPIView):
serializer_class = StaffCommentListCreateSerializer
pagination_class = TinyResultsSetPagination
permission_classes = (
permissions.IsAuthenticated,
IsAuthenticatedAndIsActivePermission,
CanListCreateStaffPermission
)
def get_queryset(self):
"""
List
"""
queryset = Staff.objects.all().order_by('-created')
return queryset
def post(self, request, format=None):
"""
Create
"""
client_ip, is_routable = get_client_ip(self.request)
serializer = StaffCommentListCreateSerializer(data=request.data, context={
'created_by': request.user,
'created_from': client_ip,
'created_from_is_public': is_routable,
'franchise': request.tenant
})
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
| [
"bart@mikasoftware.com"
] | bart@mikasoftware.com |
144328a1ef2d6268d21d76ba311a8ab52315f9e7 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/fa45c44026ed471714d0383fd2731911d16a1271-<main>-fix.py | c47982491f786ea4edc3a9e39524947a6f75ebe6 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | def main():
args = parse_args()
try:
sys.stdout = StringIO()
config_files = (cloud_config.CONFIG_FILES + CONFIG_FILES)
sdk.enable_logging(debug=args.debug)
inventory_args = dict(refresh=args.refresh, config_files=config_files, private=args.private, cloud=args.cloud)
if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(config_key='ansible', config_defaults={
'use_hostnames': False,
'expand_hostvars': True,
'fail_on_errors': True,
}))
inventory = sdk_inventory.OpenStackInventory(**inventory_args)
sys.stdout = sys.__stdout__
if args.list:
output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)
except sdk.exceptions.OpenStackCloudException as e:
sys.stderr.write(('%s\n' % e.message))
sys.exit(1)
sys.exit(0) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
20e2fa8b1ace352e52ea0894f8e32dc5c436ddab | 9cf6b31876b6fe3652e9d2613afff41793cc7d49 | /apps/cpa/forms.py | aab6be6e553718cf8eca57f5b85acd26ba63747a | [] | no_license | amyard/findinshopGit | 2a3b56c845691573b33ccc66b036cab0effa0e8e | b642bc81cf633c95ccd978d5e9fb4177eee38be4 | refs/heads/master | 2022-12-13T19:17:45.031095 | 2019-10-31T13:08:51 | 2019-10-31T13:08:51 | 213,323,148 | 0 | 0 | null | 2022-12-10T04:48:01 | 2019-10-07T07:44:13 | Roff | UTF-8 | Python | false | false | 3,269 | py | # -*- coding: utf-8 -*-
#Python imports
from datetime import timedelta
#Django imports
from django import forms
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.core.cache import cache
#Findinshop
from apps.cpa.models import CostSetting, OwnAndUserCategory
from apps.cpa.validators import MIN_COST_RATE
from apps.cpa.utils import float_to_python
from apps.section.models import Section
from apps.catalog.models import Category
from apps.website.models import Website
class CategoryCostForm(forms.ModelForm):
class Meta:
model = CostSetting
fields = ('section', 'current_rate')
def __init__(self, *args, **kwargs):
super(CategoryCostForm, self).__init__(*args, **kwargs)
self.fields['section'].queryset = Section.parents.all()
self.fields['current_rate'].help_text = 'Минимальная стоимость %s грн.' % MIN_COST_RATE
self.fields['current_rate'].to_python = float_to_python
def clean(self):
cleaned_data = super(CategoryCostForm, self).clean()
section = cleaned_data.get('section')
if section and self.user:
#setting = get_object_or_404(CostSetting, user=self.user, section=section)
setting, created = CostSetting.objects.get_or_create(user=self.user, section=section)
if setting.current_rate == cleaned_data.get('current_rate'):
raise forms.ValidationError(u'Такая ставка уже установлена.')
if setting.changed is True:
time_tree_hour_ago = timezone.now() - timedelta(hours=3)
if time_tree_hour_ago < setting.date_change:
raise forms.ValidationError(u'Повторное изменение ставки на эту категорию возможно через 3 часа')
return cleaned_data
class OwnAndUserCategoryForm(forms.ModelForm):
class Meta:
model = OwnAndUserCategory
fields = '__all__'
def __init__(self, *args, **kwargs):
super(OwnAndUserCategoryForm, self).__init__(*args, **kwargs)
#if not cache.get('key_queryset_section_children_admin', False):
# queryset_section = Section.children.all().order_by('parent__name')
# cache.set('key_queryset_section_children_admin', queryset_section, 2*3600)#2 hours
#else:
# queryset_section = cache.get('key_queryset_section_children_admin')
#self.fields['our_section'].queryset = queryset_section
self.fields['our_section'].queryset = Section.parents.all()
if self.instance.pk:
self.fields['categories'].queryset = Category.objects.filter(catalog=self.instance.site.catalog)
else:
self.fields['categories'].choices = Category.objects.none()
self.fields['site'].queryset = Website.objects.order_by('subdomain')
class ReportClickForm(forms.Form):
date_from = forms.DateField(
label=u'Начиная с даты',
#input_formats='%d.%m.%Y'
)
date_to = forms.DateField(
label=u'Заканчивая датой',
#input_formats='%d.%m.%Y'
)
| [
"maksymturenko@gmail.com"
] | maksymturenko@gmail.com |
ae516920a922dc54aabf018db891dd22f8e371f0 | 34cb685d3340cb59c2f3639b3b5ca42ff3812338 | /pptx/shapes/__init__.py | 99f249ecc8e51d468e8c571d0f1aa09118457441 | [
"MIT"
] | permissive | handwriter/python-pptx | 6b435b6c9c95fcc00cd2aa0923ca15e211228a8b | 22351c6f9fe637cadddca3461c4899af7d439711 | refs/heads/master | 2021-04-05T00:27:20.870352 | 2020-03-19T13:20:28 | 2020-03-19T13:20:28 | 248,506,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # encoding: utf-8
"""
Objects used across sub-package
"""
class Subshape(object):
"""
Provides common services for drawing elements that occur below a shape
but may occasionally require an ancestor object to provide a service,
such as add or drop a relationship. Provides ``self._parent`` attribute
to subclasses.
"""
def __init__(self, parent):
super(Subshape, self).__init__()
self._parent = parent
@property
def part(self):
"""
The package part containing this object
"""
return self._parent.part
| [
"62296664+handwriter@users.noreply.github.com"
] | 62296664+handwriter@users.noreply.github.com |
ceaa8c09f078a8c7e8371072a72da1225db7cfa5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_gigolos.py | ef7c72c0482e98e0450b0486f04f9aea27e380da | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _GIGOLOS():
def __init__(self,):
self.name = "GIGOLOS"
self.definitions = gigolo
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['gigolo']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5748810f922be1440a1298e49a103ac06d55a06e | a5747577f1f4b38823f138ec0fbb34a0380cd673 | /17/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM4000_R_0-6.py | 910fd4a43e6bc2485a23bcac0657f0fb8a30f1a0 | [] | no_license | xdlyu/fullRunII_ntuple | 346fc1da4cec9da4c404aa1ec0bfdaece6df1526 | aa00ca4ce15ae050c3096d7af779de44fc59141e | refs/heads/master | 2020-08-03T07:52:29.544528 | 2020-01-22T14:18:12 | 2020-01-22T14:18:12 | 211,673,739 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4000_R0-6_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFPuppi.txt','L1PrefiringMaps_new.root']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis_sig.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M4000-R0-6_TuneCP5_13TeV-madgraph/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v4/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4000_R0-6_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"XXX@cern.ch"
] | XXX@cern.ch |
08c5e949442ecbfec52341e878dee28f49e30e0f | 6fb8892e0455043b4776e331f8176ab1139f1fd9 | /backend/home/migrations/0001_load_initial_data.py | c1927313a664726ec8d191be309891da27b452b7 | [] | no_license | crowdbotics-apps/gozle-browser-28537 | c5bc1dc800c0b9836f3cd067d1d421fdb447def2 | fd0ee4d32ca575df2422506894f9380d11b76533 | refs/heads/master | 2023-06-07T04:50:52.183276 | 2021-07-07T15:26:51 | 2021-07-07T15:26:51 | 383,841,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "gozle-browser-28537.botics.co"
site_params = {
"name": "Gozle browser",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
60e0a38ac1246955d613e358f11c89163d652506 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_54/686.py | 4511b8d1827a6633638de3b13f1f24900abf7196 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
def gcd(a,b):
if b == 0:
return a
return gcd(b,a%b)
f = open("input.txt","r")
T = int(f.readline())
for i in range(T):
nums = f.readline().split()
nums = [int(x) for x in nums]
diff = [abs(nums[k+1] - nums[k]) for k in range(1,len(nums)-1)]
diff.sort()
#print(diff)
res = 0
for j in range(0,len(diff)):
res = gcd(res,diff[j])
if res == 1:
print("Case #",i+1,": ",0, sep="")
else:
print("Case #",i+1,": ",(res-(nums[1]%res))%res, sep="")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
bc9c3ebea8a6a5642bd542c11bcc86389e7463ea | f3d38d0e1d50234ce5f17948361a50090ea8cddf | /백준/Bronze/Bronze 4/2530번 ; 인공지능 시계 아직 안품.py | 01ea7bdd693da5cd63b5c982f15fcf886091b6d1 | [] | no_license | bright-night-sky/algorithm_study | 967c512040c183d56c5cd923912a5e8f1c584546 | 8fd46644129e92137a62db657187b9b707d06985 | refs/heads/main | 2023-08-01T10:27:33.857897 | 2021-10-04T14:36:21 | 2021-10-04T14:36:21 | 323,322,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | # https://www.acmicpc.net/problem/2530
# 현재 시각 시 A, 분 B, 초 C 입력
# 0 <= A <= 23
# 0 <= B <= 59
# 0 <= C <= 59
A, B, C = map(int, input().split(' '))
# 요리하는데 필요한 시간 D : 초 단위 입력
# 0 <= D <= 500,000
D = int(input())
# 요리하는 데 필요한 시간은 초 단위로 입력 받으므로 이것을 시, 분, 초 단위로 쪼개준다.
plus_hour = D // 3600
D = D - plus_hour * 3600
plus_minute = D // 60
plus_second = D % 60
# 결과를 표시할 시, 분, 초
result_hour = A + plus_hour
result_minute = B + plus_minute
result_second = C + plus_second
# 요리하는 데 필요한 시간을 더해주고 난 뒤 초가 60초를 넘은 경우
if result_second >= 60:
# 결과의 분에 1분을 더해주고
result_minute += 1
# 결과의 초에는 60초를 빼준다.
result_second -= 60
# 요리하는 데 필요한 시간을 더해주고 난 뒤 분이 60분을 넘은 경우
if result_minute >= 60:
# 결과의 시에 1시간을 더해주고
result_hour += 1
# 결과의 분에는 60분을 빼준다.
result_minute -= 60
# 요리하는 데 필요한 시간을 더해주고 난 뒤 시가 24시를 넘은 경우
if result_hour >= 24:
# 23시 뒤에는 0시가 되므로 결과의 시에 24를 빼준다.
result_hour -= 24
# 결과 출력
print(result_hour, result_minute, result_second)
| [
"bright_night_sky@naver.com"
] | bright_night_sky@naver.com |
929aec9c56cc8ca4c570af9cd6043c4d9256e2da | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /smallestRepunitDivByK.py | 3071d89089c335e92fa7eb56b3efbec7c1ac437c | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | class Solution:
def smallestRepunitDivByK(self, k: int) -> int:
if k == 1:
return 1
a = [1]
seen = set(a)
while True:
a.append((a[-1] * 10 + 1) % k)
if a[-1] == 0:
return len(a)
if a[-1] in seen:
return -1
seen.add(a[-1])
k = 1
k = 2
k = 3
print(Solution().smallestRepunitDivByK(k))
| [
"zzz136454872@163.com"
] | zzz136454872@163.com |
fe9981ffe57faaf1b5ac9e27c981810646f19b43 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_odieatla_sheep.py | 8de3f71090aece5636ed89940641042f89fa0783 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 743 | py | #!/usr/bin/env python
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
#n = [int(s) for s in raw_input().split(" ")]
n = int(raw_input())
limit = 100
result = range(0, 10)
m = n
to_break = False
for l in xrange(1, limit+1):
num_str = str(n*l)
#print "current number is {}".format(num_str)
for c in num_str:
#print "digit is {}".format(int(c))
#print "leftover digits are {}".format(result)
if int(c) in result:
result.remove(int(c))
if len(result) is 0:
m = num_str
to_break = True
break
if to_break:
break
if not to_break:
m = "INSOMNIA"
print "Case #{}: {}".format(i, m)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
f2f1dabe7a5b575b5074614f8cad0c656ff69e8b | 982194a86c5a11185bb55a653ba1730807b9f67f | /source-builder/sb/log.py | 8e4602311574c32458949df50ebd0837b23593a3 | [] | no_license | jeffmurphy/rtems-source-builder | f476ef158d3a4103168e70731bbad6d5ea521d04 | 539c48a40543193fc088fd8a67b3ab0ef5308c24 | refs/heads/master | 2020-12-25T13:23:57.017108 | 2015-02-09T22:37:43 | 2015-02-09T22:37:43 | 30,490,786 | 0 | 0 | null | 2015-02-08T12:48:18 | 2015-02-08T12:48:17 | null | UTF-8 | Python | false | false | 6,078 | py | #
# RTEMS Tools Project (http://www.rtems.org/)
# Copyright 2010-2012 Chris Johns (chrisj@rtems.org)
# All rights reserved.
#
# This file is part of the RTEMS Tools package in 'rtems-testing'.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Log output to stdout and/or a file.
#
import os
import sys
import error
#
# A global log.
#
default = None
#
# Global parameters.
#
tracing = False
quiet = False
def set_default_once(log):
if default is None:
default = log
def _output(text = os.linesep, log = None):
"""Output the text to a log if provided else send it to stdout."""
if text is None:
text = os.linesep
if type(text) is list:
_text = ''
for l in text:
_text += l + os.linesep
text = _text
if log:
log.output(text)
elif default is not None:
default.output(text)
else:
for l in text.replace(chr(13), '').splitlines():
print l
def stdout_raw(text = os.linesep):
print text,
sys.stdout.flush()
def stderr(text = os.linesep, log = None):
for l in text.replace(chr(13), '').splitlines():
print >> sys.stderr, l
def output(text = os.linesep, log = None):
if not quiet:
_output(text, log)
def notice(text = os.linesep, log = None):
if not quiet and default is not None and not default.has_stdout():
for l in text.replace(chr(13), '').splitlines():
print l
_output(text, log)
def trace(text = os.linesep, log = None):
if tracing:
_output(text, log)
def warning(text = os.linesep, log = None):
for l in text.replace(chr(13), '').splitlines():
notice('warning: %s' % (l), log)
def flush(log = None):
if log:
log.flush()
elif default is not None:
default.flush()
def tail(log = None):
if log is not None:
return log.tail
if default is not None:
return default.tail
return 'No log output'
class log:
"""Log output to stdout or a file."""
def __init__(self, streams = None, tail_size = 200):
self.tail = []
self.tail_size = tail_size
self.fhs = [None, None]
if streams:
for s in streams:
if s == 'stdout':
self.fhs[0] = sys.stdout
elif s == 'stderr':
self.fhs[1] = sys.stderr
else:
try:
self.fhs.append(file(s, 'w'))
except IOError, ioe:
raise error.general("creating log file '" + s + \
"': " + str(ioe))
def __del__(self):
for f in range(2, len(self.fhs)):
self.fhs[f].close()
def __str__(self):
t = ''
for tl in self.tail:
t += tl + os.linesep
return t[:-len(os.linesep)]
def _tail(self, text):
if type(text) is not list:
text = text.splitlines()
self.tail += text
if len(self.tail) > self.tail_size:
self.tail = self.tail[-self.tail_size:]
def has_stdout(self):
return self.fhs[0] is not None
def has_stderr(self):
return self.fhs[1] is not None
def output(self, text):
"""Output the text message to all the logs."""
# Reformat the text to have local line types.
text = text.replace(chr(13), '').splitlines()
self._tail(text)
out = ''
for l in text:
out += l + os.linesep
for f in range(0, len(self.fhs)):
if self.fhs[f] is not None:
self.fhs[f].write(out)
self.flush()
def flush(self):
"""Flush the output."""
for f in range(0, len(self.fhs)):
if self.fhs[f] is not None:
self.fhs[f].flush()
if __name__ == "__main__":
l = log(['stdout', 'log.txt'], tail_size = 20)
for i in range(0, 10):
l.output('log: hello world: %d\n' % (i))
l.output('log: hello world CRLF\r\n')
l.output('log: hello world NONE')
l.flush()
print '=-' * 40
print 'tail: %d' % (len(l.tail))
print l
print '=-' * 40
for i in range(0, 10):
l.output('log: hello world 2: %d\n' % (i))
l.flush()
print '=-' * 40
print 'tail: %d' % (len(l.tail))
print l
print '=-' * 40
for i in [0, 1]:
quiet = False
tracing = False
print '- quiet:%s - trace:%s %s' % (str(quiet), str(tracing), '-' * 30)
trace('trace with quiet and trace off')
notice('notice with quiet and trace off')
quiet = True
tracing = False
print '- quiet:%s - trace:%s %s' % (str(quiet), str(tracing), '-' * 30)
trace('trace with quiet on and trace off')
notice('notice with quiet on and trace off')
quiet = False
tracing = True
print '- quiet:%s - trace:%s %s' % (str(quiet), str(tracing), '-' * 30)
trace('trace with quiet off and trace on')
notice('notice with quiet off and trace on')
quiet = True
tracing = True
print '- quiet:%s - trace:%s %s' % (str(quiet), str(tracing), '-' * 30)
trace('trace with quiet on and trace on')
notice('notice with quiet on and trace on')
default = l
print '=-' * 40
print 'tail: %d' % (len(l.tail))
print l
print '=-' * 40
del l
| [
"chrisj@rtems.org"
] | chrisj@rtems.org |
60b32cb26774747a827fb99ec828f7caeb007fd8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_86/264.py | 47c4d9f5b59ad9d3d7dc6bc03f717392097e26ce | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | #!/usr/bin/env python
def solves(n, freqs):
if all(map(lambda a: a%n==0 or n%a==0 or a==1, freqs)):
return True
else:
return False
def print_solution(freqs, n, l, h):
for i in xrange(l, h+1):
if solves(i, freqs):
return str(i)
return 'NO'
def main():
T = int(raw_input())
for i in xrange(T):
n, l, h = map(int, raw_input().split(' '))
freqs = map(int, raw_input().split(' '))
print 'Case #{0}: {1}'.format(i+1, print_solution(freqs, n, l, h))
if __name__ == '__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e67bf271c819e68a36805a80819c5935dc3a5ec8 | 946062524e1995a33cc9de01dc1766da27cba16b | /py_ad_3_3.py | f0ed5c35114fb0d58b178c1b4dcdf44ee11d0342 | [
"MIT"
] | permissive | seopbo/con-par-python | 87f684332d6a72969bb5d1759143d7374c2454b1 | e74cb9c30acfdd78c12c9f7aba039d16ed1f7e78 | refs/heads/main | 2023-05-29T03:27:33.564631 | 2021-06-16T12:51:44 | 2021-06-16T12:51:44 | 372,824,317 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | """
Section 3
Concurrency, CPU Bound vs I/O Bound - Multiprocessing vs Threading vs AsyncIO
Keyword - CPU Bound, I/O Bound, AsyncIO
"""
"""
CPU Bound vs I/O Bound
CPU Bound
- 프로세스 진행 -> CPU 속도에 의해 제한(결정) -> 행렬 곱, 고속 연산, 압축 파일, 집합 연산 등
- CPU 연산 위주 작업
I/O Bound
- 파일쓰기, 디스크 작업, 네트워크 통신, 시리얼 포트 송수신 -> 작업에 의해서 병목(수행시간)이 결정
- CPU 성능 지표가 수행시간 단축으로 크게 영향을 끼치지 않음.
메모리 바인딩, 캐시 바운딩
작업 목적에 따라서 적절한 동시성 라이브러리 선택이 중요함.
최종 비교
- Multiprocessing: Multiple processes, 고가용성(CPU) Utilization -> CPU-Bound -> 10개 부엌, 10명 요리사, 10개 요리
- Threading: Single(Multi) process, Multiple threads, OS decides task switching -> Fast I/O Bound -> 1개 부엌, 10명 요리사, 10개 요리
- AsyncIO: Single process, single thread, cooperative multitasking, tasks cooperatively decide switching -> Slow I/O-Bound -> 1개 1부엌, 1명 요리사, 10개 요리
""" | [
"bsk0130@gmail.com"
] | bsk0130@gmail.com |
14433120dd972d49171446aa029b1e37702465bc | 46afa2d7d3c531c898d1ade4f9e8f896661b8db3 | /interpolate.py | 8ce445555f6323405a6d9072f48bf3612941f121 | [
"Apache-2.0"
] | permissive | Guymer/PyGuymer | b07062e9464ec134e1c83122ee1734d8bff6b4ad | 7970659645f363788d371d00e2128f0cc3a47362 | refs/heads/master | 2021-01-19T13:19:39.797501 | 2020-11-28T08:26:27 | 2020-11-28T08:26:27 | 82,384,393 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # -*- coding: utf-8 -*-
##############################################################################################
# This file is deprecated because Python 2.x is deprecated #
# A Python 3.x version of this file can be found at: #
# #
# https://github.com/Guymer/PyGuymer3/blob/master/interpolate.py #
##############################################################################################
def interpolate(x1, x2, y1, y2, x):
return (y1 * (x2 - x) + y2 * (x - x1)) / (x2 - x1)
| [
"t.m.guymer@thomasguymer.co.uk"
] | t.m.guymer@thomasguymer.co.uk |
6de16e882d54b189cd4432684fe408fb63810100 | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /trex/widgets/variableexplorer/tests/test_utils.py | f441a93f49aee22028a0bd2c3efe39bcbaa31376 | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | # -*- coding: utf-8 -*-
#
# Copyright © TRex Project Contributors
# Licensed under the terms of the MIT License
"""
Tests for utils.py
"""
# Third party imports
import pytest
# Local imports
from trex.widgets.variableexplorer.utils import sort_against
# --- Tests
# -----------------------------------------------------------------------------
def test_sort_against():
lista = [5, 6, 7]
listb = [2, 3, 1]
res = sort_against(lista, listb)
assert res == [7, 5, 6]
def test_sort_against_is_stable():
lista = [3, 0, 1]
listb = [1, 1, 1]
res = sort_against(lista, listb)
assert res == lista
if __name__ == "__main__":
pytest.main()
| [
"shkolanovaya@gmail.com"
] | shkolanovaya@gmail.com |
3c41a57cd18183f8d1d971ce71b94d38243f5a85 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/cdn/v20161002/get_profile_supported_optimization_types.py | c7c51e915af6ec616d256f41b35028165f4f7fb8 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetProfileSupportedOptimizationTypesResult',
'AwaitableGetProfileSupportedOptimizationTypesResult',
'get_profile_supported_optimization_types',
]
@pulumi.output_type
class GetProfileSupportedOptimizationTypesResult:
"""
The result of the GetSupportedOptimizationTypes API
"""
def __init__(__self__, supported_optimization_types=None):
if supported_optimization_types and not isinstance(supported_optimization_types, list):
raise TypeError("Expected argument 'supported_optimization_types' to be a list")
pulumi.set(__self__, "supported_optimization_types", supported_optimization_types)
@property
@pulumi.getter(name="supportedOptimizationTypes")
def supported_optimization_types(self) -> Optional[Sequence[str]]:
"""
Supported optimization types for a profile.
"""
return pulumi.get(self, "supported_optimization_types")
class AwaitableGetProfileSupportedOptimizationTypesResult(GetProfileSupportedOptimizationTypesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProfileSupportedOptimizationTypesResult(
supported_optimization_types=self.supported_optimization_types)
def get_profile_supported_optimization_types(profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProfileSupportedOptimizationTypesResult:
"""
The result of the GetSupportedOptimizationTypes API
:param str profile_name: Name of the CDN profile which is unique within the resource group.
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:cdn/v20161002:getProfileSupportedOptimizationTypes', __args__, opts=opts, typ=GetProfileSupportedOptimizationTypesResult).value
return AwaitableGetProfileSupportedOptimizationTypesResult(
supported_optimization_types=__ret__.supported_optimization_types)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
fb337d2f93cb7bc1c230ac5ee85b0ae6153d1a2b | 673bf701a310f92f2de80b687600cfbe24612259 | /misoclib/mem/litesata/test/scrambler_tb.py | 20c4045c45769c09a380c0ad879bb473df375f0a | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mogorman/misoc | d78340a9bf67feaede20e8cac473bcfddbd186a3 | 4ec49e2aadcff0c3ca34ebd0d35013d88f4d3e1f | refs/heads/master | 2021-01-18T05:38:39.670977 | 2015-03-10T05:37:52 | 2015-03-10T05:37:52 | 30,672,191 | 1 | 0 | null | 2015-02-11T22:05:05 | 2015-02-11T22:05:05 | null | UTF-8 | Python | false | false | 1,291 | py | import subprocess
from misoclib.mem.litesata.common import *
from misoclib.mem.litesata.core.link.scrambler import *
from misoclib.mem.litesata.test.common import *
class TB(Module):
def __init__(self, length):
self.submodules.scrambler = InsertReset(Scrambler())
self.length = length
def get_c_values(self, length):
stdin = "0x%08x" %length
with subprocess.Popen("./scrambler", stdin=subprocess.PIPE, stdout=subprocess.PIPE) as process:
process.stdin.write(stdin.encode("ASCII"))
out, err = process.communicate()
return [int(e, 16) for e in out.decode("ASCII").split("\n")[:-1]]
def gen_simulation(self, selfp):
# init CRC
selfp.scrambler.ce = 1
selfp.scrambler.reset = 1
yield
selfp.scrambler.reset = 0
# log results
yield
sim_values = []
for i in range(self.length):
sim_values.append(selfp.scrambler.value)
yield
# stop
selfp.scrambler.ce = 0
for i in range(32):
yield
# get C code reference
c_values = self.get_c_values(self.length)
# check results
s, l, e = check(c_values, sim_values)
print("shift "+ str(s) + " / length " + str(l) + " / errors " + str(e))
if __name__ == "__main__":
from migen.sim.generic import run_simulation
length = 8192
run_simulation(TB(length), ncycles=length+100, vcd_name="my.vcd")
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
f0d1d5adff2ca0f3f278aaf8ad723ec45be24682 | d28a65d23c204a9736b597ae510d9dd54d2ffd0f | /tests/testRF2Namespace.py | 9c77dd12cc6eaf93924009005bcb02f414c7cd61 | [
"BSD-3-Clause"
] | permissive | cts2/rf2db | 99ba327611e620fc5533245064afcc1daff7c164 | 985cd7ad84c8907306a0d7d309d4a1c0fb422ba4 | refs/heads/master | 2020-05-17T22:37:25.476553 | 2015-08-24T22:18:19 | 2015-08-24T22:18:19 | 15,264,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,835 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from rf2db.db.RF2Namespaces import RF2Namespace, DecodedNamespace
from rf2db.utils.sctid_generator import sctid_generator, CIMI_Namespace
from SetConfig import setConfig
from ClearConfig import clearConfig
class RF2NamespaceTestCase(unittest.TestCase):
def setUp(self):
setConfig()
def tearDown(self):
clearConfig()
def nsTest(self, ns):
x = RF2Namespace(ns)
dns = DecodedNamespace(x.nextConceptId())
self.assertEqual(ns, dns.namespace)
self.assertEqual(sctid_generator.CONCEPT._lid, dns.partition)
base = dns.item
dns2 = DecodedNamespace(x.nextConceptId())
self.assertEqual(ns, dns2.namespace)
self.assertEqual(sctid_generator.CONCEPT._lid, dns2.partition)
self.assertEqual(dns2.item, base+1)
dns = DecodedNamespace(x.nextRelationshipId())
self.assertEqual(ns, dns.namespace)
self.assertEqual(sctid_generator.RELATIONSHIP._lid, dns.partition)
dns = DecodedNamespace(x.nextDescriptionId())
self.assertEqual(ns, dns.namespace)
self.assertEqual(sctid_generator.DESCRIPTION._lid, dns.partition)
def test_CIMI(self):
self.nsTest(CIMI_Namespace)
self.nsTest(100087)
def test_strNS(self):
ns = "1000087"
x = RF2Namespace(ns)
dns = DecodedNamespace(x.nextConceptId())
self.assertEqual(ns, str(dns.namespace))
self.assertEqual(sctid_generator.CONCEPT._lid, dns.partition)
def testDecodedNamespace(self):
dns1 = DecodedNamespace(101291013)
self.assertEqual(3, dns1.checkdigit)
self.assertEqual(1, dns1.partition)
self.assertEqual(101291, dns1.item)
self.assertEqual(0, dns1.namespace)
dns1 = DecodedNamespace(101291111)
self.assertEqual(1, dns1.checkdigit)
self.assertEqual(11, dns1.partition)
self.assertEqual(101291, dns1.namespace)
self.assertEqual(0, dns1.item)
dns1 = DecodedNamespace(999999990989121104)
self.assertEqual(4, dns1.checkdigit)
self.assertEqual(10, dns1.partition)
self.assertEqual(99999999, dns1.item)
self.assertEqual(989121, dns1.namespace)
if __name__ == '__main__':
unittest.main()
| [
"solbrig.harold@mayo.edu"
] | solbrig.harold@mayo.edu |
6a2ffbb56766a47705ad8233ad60ec4b258bb9d7 | dac7095e7b5ad4dae993871c1ae45cbb7a5ce5f7 | /Character/25.Milim/Milim_O.py | 8c2045b511085f6a3595ca72e7b3ac72960686d2 | [] | no_license | Lastation/RenewalAniChaos | d12a8423f4b83cb019495c59ed059451e67e0483 | c3edb29af58925de55c11110ccaf927d2b5d1b39 | refs/heads/master | 2023-08-24T11:28:35.614844 | 2023-08-22T21:23:14 | 2023-08-22T21:23:14 | 246,617,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,766 | py | import Function as f;
const s = StringBuffer();
function main(cp)
{
if (f.count[cp] == 1)
{
MoveLocation("25.Milim_Bozo", f.heroID[cp], cp, "Anywhere");
}
f.HoldPosition(cp);
f.BanReturn(cp);
if (f.delay[cp] == 0)
{
if (f.count[cp] == 0)
{
if (f.loop[cp] == 0)
{
CreateUnit(1, "Flame Blue", "[Skill]Unit_Wait_8", cp);
SetInvincibility(Enable, "Any unit", cp, "[Skill]Unit_Wait_ALL");
MoveLocation(f.location[cp], f.heroID[cp], cp, "Anywhere");
MoveUnit(All, "Flame Blue", cp, "Anywhere", f.location[cp]);
MoveLocation("25.Milim_Bozo", "Flame Blue", cp, "Anywhere");
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] < 60)
{
var x = 50;
if (cp >= 3) x = -x;
f.DotShape(cp, 8, "40 + 1n Zealot", 0, 0);
KillUnitAt(All, "40 + 1n Zealot", "Anywhere", cp);
addloc("25.Milim_Bozo", x * 3, 0);
MoveUnit(All, "Flame Blue", cp, "Anywhere", "25.Milim_Bozo");
CreateUnit(3, "40 + 1n Wraith", "[Skill]Unit_Wait_8", cp);
SetInvincibility(Enable, "Any unit", cp, "[Skill]Unit_Wait_ALL");
MoveLocation(f.location[cp], "Flame Blue", cp, "Anywhere");
MoveUnit(1, "40 + 1n Wraith", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
addloc(f.location[cp], -x, 0);
MoveUnit(1, "40 + 1n Wraith", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
addloc(f.location[cp], -x, 0);
MoveUnit(1, "40 + 1n Wraith", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
KillUnitAt(All, "40 + 1n Wraith", "Anywhere", cp);
if ((cp >= 3 && (Bring(cp, AtLeast, 1, "Flame Blue", "[Potal]Shop7") || Bring(cp, AtLeast, 1, "Flame Blue", "[Potal]Potal7")))
|| (cp < 3 && (Bring(cp, AtLeast, 1, "Flame Blue", "[Potal]Shop8") || Bring(cp, AtLeast, 1, "Flame Blue", "[Potal]Potal8"))))
{
SetDeaths(cp, SetTo, 120, " `UniqueCoolTime");
f.SkillWait(cp, 80);
f.count[cp] = 2;
f.loop[cp] = 0;
}
else if (cp < 3)
{
if (Bring(P8, AtLeast, 1, "Buildings", "25.Milim_Bozo"))
{
SetSwitch("Unique - MilimWarning", Set);
SetSwitch("Recall - Milim", Set);
f.Voice_Routine(cp, 3);
f.SkillWait(cp, 80);
f.count[cp] += 1;
f.loop[cp] = 0;
}
else
{
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
}
else if (cp >= 3)
{
if (Bring(P7, AtLeast, 1, "Buildings", "25.Milim_Bozo"))
{
SetSwitch("Unique - MilimWarning", Set);
SetSwitch("Recall - Milim", Set);
f.Voice_Routine(cp, 3);
f.SkillWait(cp, 80);
f.count[cp] += 1;
f.loop[cp] = 0;
}
else
{
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
}
}
else if (f.loop[cp] == 60)
{
SetDeaths(cp, SetTo, 120, " `UniqueCoolTime");
f.SkillWait(cp, 80);
f.count[cp] = 2;
f.loop[cp] = 0;
}
}
else if (f.count[cp] == 1)
{
if (f.loop[cp] < 40)
{
f.EdgeShape(cp, 1, "50 + 1n Tank", 0, 7, 120);
if (f.loop[cp] % 2 == 0)
{
f.EdgeShape(cp, 1, "Protoss Dark Archon", 0, 3, 40);
}
else if (f.loop[cp] % 2 == 1)
{
f.EdgeShape(cp, 1, "Protoss Dark Archon", 0, 5, 80);
}
KillUnitAt(All, "Protoss Dark Archon", "Anywhere", cp);
KillUnitAt(All, "50 + 1n Tank", "Anywhere", cp);
KillUnitAt(All, "60 + 1n High Templar", "Anywhere", cp);
KillUnitAt(All, "Rhynadon (Badlands)", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 40)
{
MoveLocation("25.Milim_Bozo", "Flame Blue", cp, "Anywhere");
if (Deaths(CurrentPlayer, Exactly, 0, (210)))
{
MoveUnit(All, f.heroID[cp], cp, "Anywhere", "25.Milim_Bozo");
CenterView("25.Milim_Bozo");
}
f.NxNSquareShape(cp, 1, "130 + 1n Norad", 3, 75);
f.DotShape(cp, 16, "80 + 1n Goliath", 0, 0);
Order("130 + 1n Norad", cp, "Anywhere", Attack, "Anywhere");
MoveUnit(All, "80 + 1n Goliath", cp, "[Skill]Unit_Wait_ALL", f.location[cp]);
Order("80 + 1n Goliath", cp, "Anywhere", Attack, "Anywhere");
SetSwitch("Recall - Milim", Clear);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] < 45)
{
var i = f.loop[cp] - 41;
f.EdgeShape(cp, 1, "60 + 1n Siege", 0, 5 + 2 * i, 100 + 50 * i);
f.EdgeShape(cp, 1, "50 + 1n Battlecruiser", 0, 3 + 2 * i, 50 + 50 * i);
KillUnitAt(All, "60 + 1n Siege", "Anywhere", cp);
KillUnitAt(All, "50 + 1n Battlecruiser", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 45)
{
KillUnitAt(All, "130 + 1n Norad", "Anywhere", cp);
KillUnitAt(All, "80 + 1n Goliath", "Anywhere", cp);
f.EdgeShape(cp, 1, " Unit. Hoffnung 25000", 0, 3, 50);
f.EdgeShape(cp, 1, " Unit. Hoffnung 25000", 0, 5, 100);
f.EdgeShape(cp, 1, " Unit. Hoffnung 25000", 0, 7, 150);
f.EdgeShape(cp, 1, " Unit. Hoffnung 25000", 0, 9, 150);
KillUnitAt(All, " Unit. Hoffnung 25000", "Anywhere", cp);
f.SkillWait(cp, 80);
f.loop[cp] += 1;
}
else if (f.loop[cp] == 46)
{
f.Voice_Routine(cp, 4);
SetSwitch("Unique - Milim", Set);
SetDeaths(cp, SetTo, 2880, " `UniqueCoolTime");
SetDeaths(cp, SetTo, 720, " `UniqueSkill");
f.SkillWait(cp, 80);
f.count[cp] += 1;
f.loop[cp] = 0;
}
}
else if (f.count[cp] == 2)
{
RemoveUnitAt(All, "Flame Blue", "Anywhere", cp);
SetSwitch("Unique - MilimWarning", Clear);
f.SkillEnd(cp);
}
}
} | [
"ghtjd000129@naver.com"
] | ghtjd000129@naver.com |
80bb3d72b5be850cf02eca31d0d3c0b58fe08313 | 3d3f629105b0a350c011976cae02cb10b385d873 | /keras_180112/klab-10-2-mnist_nn.py | 36d1418056f2ff3caacf1143ea13e15413242fbf | [] | no_license | vt0311/acorn_tensor | fbe7a9507db15161f029f297df64bfe0c937764f | eb9d94c5f28d673b82becb31abe6640cbd18cf89 | refs/heads/master | 2021-09-07T00:32:33.819768 | 2018-02-14T07:45:06 | 2018-02-14T07:45:06 | 114,960,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
batch_size = 128
num_classes = 10
epochs = 12
# ==============================================================================
# prepare data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# ==============================================================================
# build model
# (model code from http://iostream.tistory.com/111)
model = Sequential()
# glorot_uniform : Xavier uniform initializer.
model.add(Dense(256, input_dim=784,
kernel_initializer='glorot_uniform', activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(256, kernel_initializer='glorot_uniform', activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(256, kernel_initializer='glorot_uniform', activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(256, kernel_initializer='glorot_uniform', activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# fit 메소드
# callbacks 매개변수 : fitting 이후에 적용할 함수
# validation_split : 훈련에 반영시키지 않을 데이터의 비율( 0.0 <= 비율 <= 1.0 )
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
#validation_split=0.2)
validation_split=0.3)
# ==============================================================================
# predict
score = model.evaluate(X_test, y_test, batch_size=batch_size)
print('\nTest loss:', score[0])
print('Test accuracy:', score[1])
'''
Test loss: 0.0742975851574
Test accuracy: 0.9811
'''
| [
"hsw0311@nate.com"
] | hsw0311@nate.com |
9e4713c9b200c4824571edd9a6992ce2fe3ae3a1 | f707303e4dfe383cf82c23a6bb42ccfdc4cfdb67 | /pandas-ml-quant-rl/pandas_ml_quant_rl/renderer/wrappers.py | 3fb96c84205231b00a6dece0e609e87c5eba87bc | [
"MIT"
] | permissive | jcoffi/pandas-ml-quant | 1830ec256f8c09c04f1aa77e2eecfba07d34fe68 | 650a8e8f77bc4d71136518d1c7ee65c194a99cf0 | refs/heads/master | 2023-08-31T06:45:38.060737 | 2021-09-09T04:44:35 | 2021-09-09T04:44:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | import time
from queue import Empty
from multiprocessing import Process, Event, Queue
from .abstract_renderer import Renderer
def render_frame(data_q: Queue, finish_e: Event, renderer_provider):
renderer = renderer_provider()
while True:
try:
old_state, action, new_state, reward, done = data_q.get(timeout=0.1)
renderer.plot(old_state, action, new_state, reward, done)
renderer.render()
except Empty:
renderer.render()
if finish_e.wait(0.1):
break
print("shut down online rendering !!!")
class OnlineRenderer(Renderer):
def __init__(self, renderer_provider):
super().__init__()
self.data_q = Queue()
self.finish_e = Event()
self.worker = Process(target=render_frame, args=(self.data_q, self.finish_e, renderer_provider))
self.startup = True
def plot(self, old_state, action, new_state, reward, done):
self.data_q.put_nowait((old_state, action, new_state, reward, done))
if self.startup:
time.sleep(1)
self.startup = False
def stop(self):
self.finish_e.set()
def render(self, mode=None, min_time_step=1.0):
if not self.worker.is_alive():
self.worker.start()
class MovieRenderer(Renderer):
def __init__(self, renderer_provider):
"""
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Matplotlib',
comment='Movie support!')
writer = FFMpegWriter(fps=15, metadata=metadata)
fig = plt.figure()
l, = plt.plot([], [], 'k-o')
plt.xlim(-5, 5)
plt.ylim(-5, 5)
x0, y0 = 0, 0
with writer.saving(fig, "writer_test.mp4", 100):
for i in range(100):
x0 += 0.1 * np.random.randn()
y0 += 0.1 * np.random.randn()
l.set_data(x0, y0)
writer.grab_frame()
:param renderer_provider:
"""
pass
| [
"kic@kic.kic"
] | kic@kic.kic |
1c69983d542daa4862fc550a96058d269e269317 | d3b77550a40b860970450e702b6bcd28d5f9b3e4 | /LeetCode/top_interview_questions/SingleNumber.py | 946e7f04b71da2dfe1ffc065803ebed6b32acc7d | [] | no_license | CateGitau/Python_programming | 47bc9277544814ad853b44a88f129713f1a40697 | 6ae42b3190134c4588ad785d62e08b0763cf6b3a | refs/heads/master | 2023-07-08T03:08:46.236063 | 2021-08-12T09:38:03 | 2021-08-12T09:38:03 | 228,712,021 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | '''
Given a non-empty array of integers nums,
every element appears twice except for one.
Find that single one.
Follow up: Could you implement a solution with a
linear runtime complexity and without using extra memory?
'''
nums = [2,2,1]
def singleNumber(num):
countDict = {}
for i in nums:
if i not in countDict:
countDict[i] = 1
else:
countDict[i] +=1
return min(countDict, key = countDict.get)
print(singleNumber(nums)) | [
"catherinegitau94@gmail.com"
] | catherinegitau94@gmail.com |
6fa21b8ade431f7c3d86510e7d9f2ccc1c8b4271 | efbe970cb374d4416c2c500a495994397ea18dd5 | /utils/db.py | 7a8018cd56adb2b0f4ac8c2920dbbc8cbbb04e43 | [
"MIT"
] | permissive | void-being/bepis-bot | f7d9fbc7663bb8a28c70e312fa4fb20c53c406c7 | 491b8de94b94384df6b26fa6a1325ee578020b7e | refs/heads/master | 2020-07-11T17:28:10.080879 | 2018-11-15T23:44:06 | 2018-11-15T23:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,845 | py | from datetime import datetime, timedelta
from logging import getLogger
from hashlib import md5
from os import environ
from uuid import uuid4
from threading import Thread
from queue import Queue
from pymongo import mongo_client
class EventHandler(Thread):
def __init__(self):
self.q = Queue()
super().__init__()
self.start()
def do(self, command, *args, **kwargs):
cb = Queue()
self.q.put((cb, command, args, kwargs))
return cb.get()
def run(self):
while True:
cb, cmd, args, kwargs = self.q.get()
cb.put(cmd(*args, **kwargs))
handler = EventHandler()
class BepisUser:
def __init__(self, name: str, master, contents: dict):
self.master = master
self.logger = getLogger(name + "-" + str(contents['user_id']))
self.user_id = contents['user_id']
self.shibes = contents['shibes']
self._bepis = contents['bepis']
self._last_daily = contents['last_daily']
if "powerups" not in contents:
handler.do(self.master.update_one, {"user_id": self.user_id}, {"$set": {"powerups": []}})
self.powerups = []
else:
self.powerups = contents['powerups']
@property
def bepis(self):
return self._bepis
@bepis.setter
def bepis(self, value):
handler.do(self.master.update_one, {"user_id": self.user_id}, {"$set": {"bepis": value}})
self._bepis = value
self.logger.debug("Updated bepis to: " + str(value))
@property
def last_daily(self):
return self._last_daily
@last_daily.setter
def last_daily(self, value):
handler.do(self.master.update_one, {"user_id": self.user_id}, {"$set": {"last_daily": value}})
self._last_daily = value
self.logger.debug("Updated last_daily to now")
def add_shibe(self, shibe_name: str, amount: int=None):
for i, shibe in enumerate(self.shibes):
if shibe[0] == shibe_name:
shibe_amount = (shibe[1] + 1) if amount is None else amount
self.shibes[i] = shibe_name, shibe_amount
break
else:
self.shibes.append((shibe_name, (1 if amount is None else amount)))
handler.do(self.master.update_one, {"user_id": self.user_id}, {"$set": {"shibes": self.shibes}})
self.logger.debug("Added shibe: " + shibe_name)
def remove_shibe(self, shibe_index: int):
shibe = self.shibes[shibe_index]
new_count = shibe[1] - 1
if not new_count:
self.shibes.pop(shibe_index)
else:
self.shibes[shibe_index] = (shibe[0], new_count)
handler.do(self.master.update_one, {"user_id": self.user_id}, {"$set": {"shibes": self.shibes}})
self.logger.debug("Removed shibe: " + shibe[0])
def add_powerup(self, *data):
powerups = self.powerups.copy()
powerups.append(data)
handler.do(self.master.update_one, {"user_id": self.user_id}, {"$set": {"powerups": powerups}})
self.logger.debug("Added powerup: " + powerups[0])
def remove_powerup(self, name: str):
for powerup in self.powerups:
if powerup[0] == name and powerup[1] is not None:
break
self.powerups.remove(powerup)
handler.do(self.master.update_one, {"user_id": self.user_id}, {"$set": {"powerups": powerup}})
self.logger.debug("Removed powerup: " + powerup[0])
class Database(EventHandler):
def __init__(self, name: str):
self.logger = getLogger(name + "-database")
self.client = mongo_client.MongoClient(environ["MONGO_URI"])
self.profiles = self.client['bepis_bot']['profiles']
self.profiles.create_index("user_id", unique=True)
super().__init__()
def create_user(self, user):
payload = {
"user_id": user.id,
"bepis": 0,
"shibes": [],
"last_daily": datetime.now() - timedelta(days=1),
"invite_url": None
}
handler.do(self.profiles.insert_one, payload)
self.logger.debug("Created User: " + str(user.id))
return BepisUser(self.logger.name, self.profiles, payload)
def find_user(self, user_id: int):
prof = handler.do(self.profiles.find_one, {"user_id": user_id})
if prof:
self.logger.debug("Found user: " + str(user_id))
return BepisUser(self.logger.name, self.profiles, prof)
self.logger.debug("Could not find user: " + str(user_id))
class InviteDatabase(EventHandler):
def __init__(self):
self.logger = getLogger("InviteDatabase")
self.client = mongo_client.MongoClient(environ["MONGO_URI"])
self.invites = self.client['bepis_bot']['invites']
self.profiles = self.client['bepis_bot']['profiles']
super().__init__()
def already_joined(self, member):
user = handler.do(self.profiles.find_one, {"user_id": member.user.id})
if user is None:
self.logger.debug("Checking join on {0} (hasn't joined)".format(member.user.id))
return False
else:
self.logger.debug("Checking join on {0} (has joined)".format(member.user.id))
return True
def register_invite(self, invite_code: str, user_id: int):
handler.do(self.invites.insert_one, {
"invite_code": invite_code,
"user_id": user_id
})
self.logger.debug("Created invite reg for {0}, invite: {1}".format(user_id, invite_code))
def __iter__(self):
for invite in self.invites.find({}):
yield invite
def remove_invite(self, invite_code: str):
handler.do(self.invites.delete_one, {"invite_code": invite_code})
self.logger.debug("Removed invite: {0}".format(invite_code))
class CodeDatabase:
def __init__(self):
self.logger = getLogger("CodeDatabase")
self.client = mongo_client.MongoClient(environ["MONGO_URI"])
self.codes = self.client['bepis_bot']['codes']
self.codes.create_index("hash", unique=True)
def create_code(self, value: str):
code = str(uuid4()).upper()
hashed = md5(code.encode()).hexdigest()
handler.do(self.codes.insert_one, {"hash": hashed,
"value": value})
return code
def activate_code(self, code: str):
hashed = md5(code.encode()).hexdigest()
result = self.codes.find_one({"hash": hashed})
if result:
self.codes.delete_one({"hash": hashed})
return result['value']
class LotteryDatabase:
def __init__(self):
self.logger = getLogger("LotteryDatabase")
self.client = mongo_client.MongoClient(environ["MONGO_URI"])
self.lottery = self.client['bepis_bot']['lottery']
def start_lottery(self, value, length=(12 * 60 * 60), price=10):
handler.do(self.lottery.delete_many, {})
handler.do(self.lottery.insert_one, {
"type": "LOTTERY",
"start_time": datetime.now(),
"length": length,
"price": price,
"value": value
})
def add_tickets(self, id: str, amount: int):
current_amount = handler.do(self.lottery.find_one, {"user_id": id})
if current_amount:
total = current_amount["amount"] + amount
handler.do(self.lottery.update_one, {"user_id": id}, {"$set": {"amount": total}})
else:
handler.do(self.lottery.insert_one, {
"type": "USER",
"user_id": id,
"amount": amount
})
def get_event(self):
return handler.do(self.lottery.find_one, {"type": "LOTTERY"})
def get_users(self):
return handler.do(self.lottery.find, {"type": "USER"})
| [
"zwork101@gmail.com"
] | zwork101@gmail.com |
386fd37b61525c2096f4a65d5ec83bd686be6528 | d68ca034018d66f73024223d4b2266b3c3c901d7 | /prev/myworks/lennys/chainxy/spiders/lennys.py | 16da51425667364402f6e451a3adb30645e65cd6 | [] | no_license | GoodyIT/scrapy | caff30d26660f778008ad50532e364ab36aba4c2 | 5ae80cf83dc62c4e1bd2bfa11049ca39a3ca3488 | refs/heads/master | 2021-01-20T10:10:18.365194 | 2017-07-22T13:09:07 | 2017-07-22T13:09:07 | 90,330,210 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,527 | py | import scrapy
import json
import csv
from scrapy.spiders import Spider
from scrapy.http import FormRequest
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from chainxy.items import ChainItem
import pdb
class Lennys(scrapy.Spider):
name = "lennys"
domain = "http://www.lennys.com/"
store_id = []
# calculate number of pages
def start_requests(self):
yield scrapy.Request(url='https://www.lennys.com/locations/', headers={'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Cookie':'CFID=59662906; CFTOKEN=31653469; _gat=1; _ga=GA1.2.1230111863.1493822037; _gid=GA1.2.1456468815.1493823274'}, callback=self.parse_state)
def parse_state(self, response):
pdb.set_trace()
state_list=response.xpath('area');
for state in state_list:
if state.xpath('.//@href'):
url = "http://www.lennys.com" + state.xpath('.//@href/text()').extract_first()
request = scrapy.Request(url=url, callback=self.parse_store_contents)
yield request
# pare store detail page
def parse_store_contents(self, response):
pdb.set_trace()
store = response.xpath("//div[contains(@class, 'contact_information BasicInfo-BS')]")
item = ChainItem()
item['store_name'] = ''
item['store_number'] = response.meta["store_number"]
address = store.xpath("//p[contains(@class, 'Address')]/text()").extract()
item['address'] = response.meta["address"]
item['address2'] = ''
item['phone_number'] = response.meta["phone_number"]
item['city'] = response.meta["city"]
item['state'] = response.meta["state"]
item['country'] = 'United States'
item['city'] = response.meta["city"]
item['latitude'] = response.meta["lat"]
item['longitude'] = response.meta["lng"]
item['zip_code'] = location.raw["address"]["postcode"]
item['store_hours'] = self.validate(store.xpath(".//dd/text()"))
#item['store_type'] = info_json["@type"]
item['other_fields'] = ""
item['coming_soon'] = "0"
yield item
def validate(self, xpath_obj):
try:
return xpath_obj.extract_first().strip()
except:
return ""
| [
"johnsondavid489@yahoo.com"
] | johnsondavid489@yahoo.com |
52d9885475a54a1a940aad89d53e0ad69b4ede67 | 3dbc74df3a97ce8254935a08608e8a7966a1bba6 | /math/0x00-linear_algebra/12-bracin_the_elements.py | eb3480dcce06fe9737c77707f4fb45cfa3363dd2 | [] | no_license | s0m35h1t/holbertonschool-machine_learning | 9c0475da64eea87565dd90d70eeb23016ef17600 | 2eb7965900fd018f4092d2fb1e2055d35ba4899e | refs/heads/master | 2023-02-28T21:57:50.558030 | 2021-02-07T20:44:01 | 2021-02-07T20:44:01 | 279,866,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | #!/usr/bin/env python3
"""Array operations"""
def np_elementwise(mat1, mat2):
"""+. -. *, / operations between matrix.
Args:
mat1 (numpy.ndarray): M1 matrix.
mat2 (numpy.ndarray): M2 matrix.
Returns:
numpy.ndarray: M1+M2 M1-M2, M1*M2 M1/M2
"""
return mat1 + mat2, mat1 - mat2, mat1 * mat2, mat1 / mat2
| [
"adib.grouz@gmail.com"
] | adib.grouz@gmail.com |
4d3e7de00e12814dd0c68c0dde2ce72c7722b95b | 6aee7149a16a71389e0916de1854f4edea026c2b | /test/download.py | 85081f5885c05233a84be7bdecbb5551a69b5b5b | [
"BSD-2-Clause"
] | permissive | orionzhou/maize | d5e3c66af285d5d3a490fe09e85f840bd033240a | 605c895c397c9f614955a6df8eed0edc553f543d | refs/heads/main | 2022-12-27T02:08:26.747564 | 2022-11-24T07:57:30 | 2022-11-24T07:57:30 | 11,537,821 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import os.path as op
import wget
if sys.version_info[0] < 3:
int_types = (int, long)
urlopen = urllib.urlopen
else:
int_types = (int,)
basestring = str
from urllib.request import urlopen
def download_file(src_ftp, dst_file, prt=sys.stdout, loading_bar=True):
"""Download specified file if necessary."""
if os.path.isfile(dst_file):
return
do_gunzip = src_ftp[-3:] == '.gz' and dst_file[-3:] != '.gz'
dst_wget = "{DST}.gz".format(DST=dst_file) if do_gunzip else dst_file
# Write to stderr, not stdout so this message will be seen when running nosetests
wget_msg = "wget.download({SRC} out={DST})\n".format(SRC=src_ftp, DST=dst_wget)
sys.stderr.write(" {WGET}".format(WGET=wget_msg))
if loading_bar:
loading_bar = wget.bar_adaptive
try:
wget.download(src_ftp, out=dst_wget, bar=loading_bar)
if do_gunzip:
if prt is not None:
prt.write(" gunzip {FILE}\n".format(FILE=dst_wget))
gzip_open_to(dst_wget, dst_file)
except IOError as errmsg:
import traceback
traceback.print_exc()
sys.stderr.write("**FATAL cmd: {WGET}".format(WGET=wget_msg))
sys.stderr.write("**FATAL msg: {ERR}".format(ERR=str(errmsg)))
sys.exit(1)
def gzip_open_to(fin_gz, fout):
"""Unzip a file.gz file."""
with gzip.open(fin_gz, 'rb') as zstrm:
with open(fout, 'wb') as ostrm:
ostrm.write(zstrm.read())
assert os.path.isfile(fout), "COULD NOT GUNZIP({G}) TO FILE({F})".format(G=fin_gz, F=fout)
os.remove(fin_gz)
| [
"zhoupenggeni@gmail.com"
] | zhoupenggeni@gmail.com |
b99beec62cc60a55e6c46768b861059a9b5a6843 | 147648c6b25ecc33e82a36b36de6623df9340e62 | /examples/hacker_news_assets/hacker_news_assets_tests/test_sensors/test_slack_on_pipeline_failure_sensor.py | 285174018e5397b10ae037b9f992ed283ee06718 | [
"Apache-2.0"
] | permissive | asdlei99/dagster | be81009ff00dbad02f7cec974650388a5cc2af59 | bbfd1a22e85a10881d7dbbcc888957a487f0c3e5 | refs/heads/master | 2023-08-28T07:18:23.838943 | 2021-11-08T23:09:07 | 2021-11-08T23:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | from dagster import repository
from hacker_news_assets.sensors.slack_on_pipeline_failure_sensor import make_pipeline_failure_sensor
def test_slack_on_pipeline_failure_def():
@repository
def my_repo_local():
return [
make_pipeline_failure_sensor("localhost"),
]
@repository
def my_repo_staging():
return [
make_pipeline_failure_sensor("https://dev.something.com"),
]
@repository
def my_repo_prod():
return [
make_pipeline_failure_sensor("https://prod.something.com"),
]
assert my_repo_local.has_sensor_def("slack_on_pipeline_failure")
assert my_repo_staging.has_sensor_def("slack_on_pipeline_failure")
assert my_repo_prod.has_sensor_def("slack_on_pipeline_failure")
| [
"noreply@github.com"
] | asdlei99.noreply@github.com |
c4c5dc4c7c52ced3d2aac3855ae1ca3b733e6aec | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/research/delf/delf/python/feature_extractor.py | 9545337f18724520e260af4e36ffa6ee35bce4c6 | [
"MIT",
"Apache-2.0"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 6,181 | py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DELF feature extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def NormalizePixelValues(image,
pixel_value_offset=128.0,
pixel_value_scale=128.0):
"""Normalize image pixel values.
Args:
image: a uint8 tensor.
pixel_value_offset: a Python float, offset for normalizing pixel values.
pixel_value_scale: a Python float, scale for normalizing pixel values.
Returns:
image: a float32 tensor of the same shape as the input image.
"""
image = tf.cast(image, dtype=tf.float32)
image = tf.truediv(tf.subtract(image, pixel_value_offset), pixel_value_scale)
return image
def CalculateReceptiveBoxes(height, width, rf, stride, padding):
"""Calculate receptive boxes for each feature point.
Args:
height: The height of feature map.
width: The width of feature map.
rf: The receptive field size.
stride: The effective stride between two adjacent feature points.
padding: The effective padding size.
Returns:
rf_boxes: [N, 4] receptive boxes tensor. Here N equals to height x width.
Each box is represented by [ymin, xmin, ymax, xmax].
"""
x, y = tf.meshgrid(tf.range(width), tf.range(height))
coordinates = tf.reshape(tf.stack([y, x], axis=2), [-1, 2])
# [y,x,y,x]
point_boxes = tf.cast(
tf.concat([coordinates, coordinates], 1), dtype=tf.float32)
bias = [-padding, -padding, -padding + rf - 1, -padding + rf - 1]
rf_boxes = stride * point_boxes + bias
return rf_boxes
def CalculateKeypointCenters(boxes):
"""Helper function to compute feature centers, from RF boxes.
Args:
boxes: [N, 4] float tensor.
Returns:
centers: [N, 2] float tensor.
"""
return tf.divide(
tf.add(
tf.gather(boxes, [0, 1], axis=1), tf.gather(boxes, [2, 3], axis=1)),
2.0)
def ApplyPcaAndWhitening(data,
pca_matrix,
pca_mean,
output_dim,
use_whitening=False,
pca_variances=None):
"""Applies PCA/whitening to data.
Args:
data: [N, dim] float tensor containing data which undergoes PCA/whitening.
pca_matrix: [dim, dim] float tensor PCA matrix, row-major.
pca_mean: [dim] float tensor, mean to subtract before projection.
output_dim: Number of dimensions to use in output data, of type int.
use_whitening: Whether whitening is to be used.
pca_variances: [dim] float tensor containing PCA variances. Only used if
use_whitening is True.
Returns:
output: [N, output_dim] float tensor with output of PCA/whitening operation.
"""
output = tf.matmul(
tf.subtract(data, pca_mean),
tf.slice(pca_matrix, [0, 0], [output_dim, -1]),
transpose_b=True,
name='pca_matmul')
# Apply whitening if desired.
if use_whitening:
output = tf.divide(
output,
tf.sqrt(tf.slice(pca_variances, [0], [output_dim])),
name='whitening')
return output
def PostProcessDescriptors(descriptors, use_pca, pca_parameters=None):
"""Post-process descriptors.
Args:
descriptors: [N, input_dim] float tensor.
use_pca: Whether to use PCA.
pca_parameters: Only used if `use_pca` is True. Dict containing PCA
parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening',
'variances'.
Returns:
final_descriptors: [N, output_dim] float tensor with descriptors after
normalization and (possibly) PCA/whitening.
"""
# L2-normalize, and if desired apply PCA (followed by L2-normalization).
final_descriptors = tf.nn.l2_normalize(
descriptors, axis=1, name='l2_normalization')
if use_pca:
# Apply PCA, and whitening if desired.
final_descriptors = ApplyPcaAndWhitening(final_descriptors,
pca_parameters['matrix'],
pca_parameters['mean'],
pca_parameters['dim'],
pca_parameters['use_whitening'],
pca_parameters['variances'])
# Re-normalize.
final_descriptors = tf.nn.l2_normalize(
final_descriptors, axis=1, name='pca_l2_normalization')
return final_descriptors
def DelfFeaturePostProcessing(boxes, descriptors, use_pca, pca_parameters=None):
"""Extract DELF features from input image.
Args:
boxes: [N, 4] float tensor which denotes the selected receptive box. N is
the number of final feature points which pass through keypoint selection
and NMS steps.
descriptors: [N, input_dim] float tensor.
use_pca: Whether to use PCA.
pca_parameters: Only used if `use_pca` is True. Dict containing PCA
parameter tensors, with keys 'mean', 'matrix', 'dim', 'use_whitening',
'variances'.
Returns:
locations: [N, 2] float tensor which denotes the selected keypoint
locations.
final_descriptors: [N, output_dim] float tensor with DELF descriptors after
normalization and (possibly) PCA/whitening.
"""
# Get center of descriptor boxes, corresponding to feature locations.
locations = CalculateKeypointCenters(boxes)
final_descriptors = PostProcessDescriptors(descriptors, use_pca,
pca_parameters)
return locations, final_descriptors
| [
"andreas.boerzel@gmx.de"
] | andreas.boerzel@gmx.de |
c49aed0557e05e299c3be8527df443644b2e2241 | a8a2491a21ee53066f42ed3cd8c1d5169858790b | /pizzaria/pizzaria/entrega/fixtures/db-update.py | 6bfbb3d7608012395bcf9d9712af49c49baaeb69 | [] | no_license | huogerac/acpy_pizzaria | 80c7236f7c9d8bf58bcf4a9f45e2bfeacd8e4d38 | eec5f88478a424ed3b193c0a6ed4f31d88b1d0e5 | refs/heads/master | 2016-09-05T18:07:14.555495 | 2012-04-13T02:04:53 | 2012-04-13T02:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,371 | py | #!/usr/bin/env python
# coding: utf-8
import json
import argparse
import os
"""
atualiza estrutura de um arquivo json
>>> json = [{'pk': 1, 'model': 'entrega.cliente', 'fields': { 'nome': 'Juca' } }]
>>> updater = JsonUpdate( json )
>>> updater.json()
[{'pk': 1, 'model': 'entrega.cliente', 'fields': {'nome': 'Juca'}}]
>>> updater.addNewField(('ramal', ''))
>>> updater.updateJson()
>>> updater.json()
[{'pk': 1, 'model': 'entrega.cliente', 'fields': {'ramal': '', 'nome': 'Juca'}}]
>>> updater.addNewField(('logradouro', ''))
>>> updater.addNewField(('numero', 0))
>>> updater.addNewField(('complemento', ''))
>>> updater.addNewField(('obs', ''))
>>> updater.updateJson()
>>> updater.json()
[{'pk': 1, 'model': 'entrega.cliente', 'fields': {'ramal': '', 'complemento': '', 'nome': 'Juca', 'logradouro': '', 'numero': 0, 'obs': ''}}]
> >> updater.save_newfile('clientes_new.json')
"""
class DbUpdate(object):
def __init__(self, filename):
self.filename = filename
def read_file(self):
with open(os.path.join(os.path.dirname(__file__), self.filename)) as f:
self.clientes = json.load(f)
def addFields(self, newfield):
self.fields.append(newfield)
def update_content(self):
for cliente in self.clientes:
fields = cliente["fields"]
for newfield, value in self.fields:
fields[newfield] = value
def show_content(self):
print self.clientes
def save_newfile(self, newfile):
newcontent = json.dumps(self.clientes, sort_keys=False, indent=4)
jsonfile = open(newfile, "w")
jsonfile.write(newcontent)
jsonfile.close
class JsonUpdate(object):
def __init__(self, json):
self._json = json
self._fields = []
def addNewField(self, newfield):
self._fields.append(newfield)
def fields(self, list_fields):
self._fields = list_fields
def updateJson(self):
for item in self._json:
fields = item["fields"]
for newfield, value in self._fields:
fields[newfield] = value
def json(self):
return self._json
def update_json(filename, fields_filename):
print '-'*60
print 'atualizando ', filename
print 'atualizando ', fields_filename
json_content = []
with open(os.path.join(os.path.dirname(__file__), filename)) as f1:
json_content = json.load(f1)
json_fields = []
with open(os.path.join(os.path.dirname(__file__), fields_filename)) as f2:
json_fields = json.load(f2)
print json_content
print '------------'
print json_fields
parser = argparse.ArgumentParser(description='json update')
parser.add_argument('filename', help='file name ex: customer.json')
parser.add_argument('fields', help='file name with new fields content like: ["campo1": "valor", "campo2": "valor"]')
args = parser.parse_args()
update_json(args.filename, args.fields)
converter dict para list:
dict = {}
dict['Capital']="London"
dict['Food']="Fish&Chips"
dict['2012']="Olympics"
#lists
temp = []
dictList = []
#My attempt:
for key, value in dict.iteritems():
temp = [key,value]
dictlist.append(temp)
| [
"huogerac@gmail.com"
] | huogerac@gmail.com |
9aa34dbf85f69d40cc103e7934a8361b0e268c80 | 39e1e256acae3fe9be4434024d42b9bb47bdd02f | /analysis/submissions/34b1ef17e6625ba2350f6f1c169591a1_task7-2_1595553510/task7-2/main.py | 57e71b6a83e9ecb3955d6e56748da540dc53cbbf | [] | no_license | neulab/tranx-study | 9fb67b9a2181f0b362e4f97316c502eee4539b19 | e2a7089689f7f95e773e19c8f19513abe4fb8b9b | refs/heads/master | 2023-06-14T04:46:01.010892 | 2021-07-08T09:29:05 | 2021-07-08T09:29:05 | 250,357,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,872 | py | # Example code, write your program here
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
def autolabel(rects, i):
for rect in rects:
h = rect.get_height()
axs[i].text(rect.get_x()+rect.get_width()/2., h+1, '%.2f' % h, ha='center', va='bottom')
df = pd.read_csv("StudentsPerformance.csv")
fig, axs = plt.subplots(1, 3, figsize=(20, 6))
ind = np.arange(5)
width = 0.35
male = df.loc[df['gender'] == 'male'].groupby("race/ethnicity")
female = df.loc[df['gender'] == 'female'].groupby("race/ethnicity")
rects1 = axs[0].bar(ind - width/2, male['math score'].mean(), width, label='Male')
rects2 = axs[0].bar(ind + width/2, female['math score'].mean(), width, label='Female')
axs[0].set_xlabel('Race/Ethnicity')
axs[0].set_ylabel('Average Scores')
axs[0].set_title('Math')
axs[0].set_xticks(ind)
axs[0].set_xticklabels(('A', 'B', 'C', 'D', 'E'))
axs[0].legend()
autolabel(rects1, 0)
autolabel(rects2, 0)
rects1 = axs[1].bar(ind - width/2, male['reading score'].mean(), width, label='Male')
rects2 = axs[1].bar(ind + width/2, female['reading score'].mean(), width, label='Female')
axs[1].set_xlabel('Race/Ethnicity')
axs[1].set_ylabel('Average Scores')
axs[1].set_title('Reading')
axs[1].set_xticks(ind)
axs[1].set_xticklabels(('A', 'B', 'C', 'D', 'E'))
axs[1].legend()
autolabel(rects1, 1)
autolabel(rects2, 1)
rects1 = axs[2].bar(ind - width/2, male['writing score'].mean(), width, label='Male')
rects2 = axs[2].bar(ind + width/2, female['writing score'].mean(), width, label='Female')
axs[2].set_xlabel('Race/Ethnicity')
axs[2].set_ylabel('Average Scores')
axs[2].set_title('Writing')
axs[2].set_xticks(ind)
axs[2].set_xticklabels(('A', 'B', 'C', 'D', 'E'))
axs[2].legend()
autolabel(rects1, 2)
autolabel(rects2, 2)
fig.suptitle("Scores by race/ethnicity and gender")
plt.savefig("output/grouped_scores.png")
| [
"frankxu2004@gmail.com"
] | frankxu2004@gmail.com |
496b5d2ed0eedb526dbcbefe24d68668eb530bb8 | 757132ddc333fdabf4b183ac59a232228f9d3f44 | /dict_os_play.py | a9fbc765161e07770a85ba4a0d51353d5fd57eca | [] | no_license | rjcmarkelz/RNAseq_rename_script | d9f6ebebbf80c6b36dd554d2bc5a7e4d731c586e | ebb40f61cbdcefc321aae9f7b2f4ffa489e03705 | refs/heads/master | 2021-01-19T22:13:50.149084 | 2013-04-27T05:57:02 | 2013-04-27T05:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | #!/usr/bin/python
import os
import re
RN_Dict = {
'RIL_360' :'RIL_1.rn',
'RIL_73' :'RIL_1.rn',
'RIL_259' :'RIL_103.rn',
'RIL_251' :'RIL_104.rn',
'RIL_113' :'RIL_113.rn',
'RIL_265' :'RIL_113.rn',
}
print RN_Dict
# Open a file
#cody_2 path
#path = "/Users/Cody_2/git.repos/RILS/Block1/project.maloof/"
#cody_1 path
path = /Users/Cody/Documents/Maloof Lab/My Brassica/Block2/project.maloof/
pathfiles = os.listdir(path)
for file in pathfiles:
source_name = file
for key in RN_Dict:
link_name = file.replace(key, RN_Dict[key])
print link_name
#os.symlink(source_name, link_name)
#print file
#print source_name
#print link_name
| [
"rjmarkelz@ucdavis.edu"
] | rjmarkelz@ucdavis.edu |
be3a8630c86bf4562819caacf591832785f84592 | fa93e53a9eee6cb476b8998d62067fce2fbcea13 | /devel/.private/pal_navigation_msgs/lib/python2.7/dist-packages/pal_navigation_msgs/msg/__init__.py | d75a255b3ed14cbd6bca1f6eef3c4d8666bc8d6a | [] | no_license | oyetripathi/ROS_conclusion_project | 2947ee2f575ddf05480dabc69cf8af3c2df53f73 | 01e71350437d57d8112b6cec298f89fc8291fb5f | refs/heads/master | 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | from ._AvailableMaps import *
from ._Emergency import *
from ._EulerAngles import *
from ._EulerAnglesStamped import *
from ._GoToAction import *
from ._GoToActionFeedback import *
from ._GoToActionGoal import *
from ._GoToActionResult import *
from ._GoToFeedback import *
from ._GoToGoal import *
from ._GoToPOIAction import *
from ._GoToPOIActionFeedback import *
from ._GoToPOIActionGoal import *
from ._GoToPOIActionResult import *
from ._GoToPOIFeedback import *
from ._GoToPOIGoal import *
from ._GoToPOIResult import *
from ._GoToResult import *
from ._Highways import *
from ._JoyPriorityAction import *
from ._JoyPriorityActionFeedback import *
from ._JoyPriorityActionGoal import *
from ._JoyPriorityActionResult import *
from ._JoyPriorityFeedback import *
from ._JoyPriorityGoal import *
from ._JoyPriorityResult import *
from ._JoyTurboAction import *
from ._JoyTurboActionFeedback import *
from ._JoyTurboActionGoal import *
from ._JoyTurboActionResult import *
from ._JoyTurboFeedback import *
from ._JoyTurboGoal import *
from ._JoyTurboResult import *
from ._LaserImage import *
from ._MapConfiguration import *
from ._NavigationStatus import *
from ._NiceMapTransformation import *
from ._POI import *
from ._PolarReading import *
from ._PolarReadingScan import *
from ._ServiceStatus import *
from ._TabletPOI import *
from ._VisualLocDB import *
from ._VisualTrainingAction import *
from ._VisualTrainingActionFeedback import *
from ._VisualTrainingActionGoal import *
from ._VisualTrainingActionResult import *
from ._VisualTrainingFeedback import *
from ._VisualTrainingGoal import *
from ._VisualTrainingResult import *
| [
"sandeepan.ghosh.ece20@itbhu.ac.in"
] | sandeepan.ghosh.ece20@itbhu.ac.in |
9da351c36861948347a97f559f18e3c7d0507905 | 3292017df3ff6c7190d5c5a60ecf5f8936cb7b90 | /checkio/Elementary/Index Power/index_power.py | e293ee43d262aad903dab773ab170bca99681aeb | [
"MIT"
] | permissive | KenMercusLai/checkio | 1e9cdfe70ccaf5315db36391c4710533d99cf9aa | 5082ab0c6a7ae2d97963568a6f41589332e88029 | refs/heads/master | 2022-05-12T18:22:22.604531 | 2022-05-11T09:00:28 | 2022-05-11T09:00:28 | 22,260,056 | 39 | 22 | NOASSERTION | 2022-05-11T08:42:05 | 2014-07-25T14:40:06 | Python | UTF-8 | Python | false | false | 528 | py | def index_power(array, n):
# Find Nth power of the element with index N.
if n > len(array) - 1:
return -1
else:
return array[n] ** n
if __name__ == '__main__': # pragma: no cover
# These "asserts" using only for self-checking and not necessary for
# auto-testing
assert index_power([1, 2, 3, 4], 2) == 9, "Square"
assert index_power([1, 3, 10, 100], 3) == 1_000_000, "Cube"
assert index_power([0, 1], 0) == 1, "Zero power"
assert index_power([1, 2], 3) == -1, "IndexError"
| [
"ken.mercus.lai@gmail.com"
] | ken.mercus.lai@gmail.com |
93f43ffd58185a4000535c395d163b5c91f6ebdd | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/1330/codes/1595_1446.py | ae1ff47705f5df6b8f4503beaa1b154e0044a419 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
q = float(input())
f = q/3
print(round(f, 3)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
a7062d038bf0fc26a95efe6d963427056ee70d87 | cd18e8900018afb643c44286a8d4db5d8d1c87c1 | /likelihood.py | 6bfaac5b749d06bd419e94daf5f6ed584a395940 | [] | no_license | pbizimis/score_sde_pytorch | 064a0bf9b2f116b2f0a2e9c7e129a8c38f7aa0b6 | cb0e019fc7f1262724877730d64f75eb16aab1b0 | refs/heads/main | 2023-04-18T21:15:31.753968 | 2021-03-21T22:48:36 | 2021-03-21T22:48:36 | 349,578,514 | 1 | 0 | null | 2021-03-19T23:18:21 | 2021-03-19T23:18:20 | null | UTF-8 | Python | false | false | 4,713 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
# pytype: skip-file
"""Various sampling methods."""
import torch
import numpy as np
from scipy import integrate
from models import utils as mutils
def get_div_fn(fn):
"""Create the divergence function of `fn` using the Hutchinson-Skilling trace estimator."""
def div_fn(x, t, eps):
with torch.enable_grad():
x.requires_grad_(True)
fn_eps = torch.sum(fn(x, t) * eps)
grad_fn_eps = torch.autograd.grad(fn_eps, x)[0]
x.requires_grad_(False)
return torch.sum(grad_fn_eps * eps, dim=tuple(range(1, len(x.shape))))
return div_fn
def get_likelihood_fn(sde, inverse_scaler, hutchinson_type='Rademacher',
rtol=1e-5, atol=1e-5, method='RK45', eps=1e-5):
"""Create a function to compute the unbiased log-likelihood estimate of a given data point.
Args:
sde: A `sde_lib.SDE` object that represents the forward SDE.
inverse_scaler: The inverse data normalizer.
hutchinson_type: "Rademacher" or "Gaussian". The type of noise for Hutchinson-Skilling trace estimator.
rtol: A `float` number. The relative tolerance level of the black-box ODE solver.
atol: A `float` number. The absolute tolerance level of the black-box ODE solver.
method: A `str`. The algorithm for the black-box ODE solver.
See documentation for `scipy.integrate.solve_ivp`.
eps: A `float` number. The probability flow ODE is integrated to `eps` for numerical stability.
Returns:
A function that a batch of data points and returns the log-likelihoods in bits/dim,
the latent code, and the number of function evaluations cost by computation.
"""
def drift_fn(model, x, t):
"""The drift function of the reverse-time SDE."""
score_fn = mutils.get_score_fn(sde, model, train=False, continuous=True)
# Probability flow ODE is a special case of Reverse SDE
rsde = sde.reverse(score_fn, probability_flow=True)
return rsde.sde(x, t)[0]
def div_fn(model, x, t, noise):
return get_div_fn(lambda xx, tt: drift_fn(model, xx, tt))(x, t, noise)
def likelihood_fn(model, data):
"""Compute an unbiased estimate to the log-likelihood in bits/dim.
Args:
model: A score model.
data: A PyTorch tensor.
Returns:
bpd: A PyTorch tensor of shape [batch size]. The log-likelihoods on `data` in bits/dim.
z: A PyTorch tensor of the same shape as `data`. The latent representation of `data` under the
probability flow ODE.
nfe: An integer. The number of function evaluations used for running the black-box ODE solver.
"""
with torch.no_grad():
shape = data.shape
if hutchinson_type == 'Gaussian':
epsilon = torch.randn_like(data)
elif hutchinson_type == 'Rademacher':
epsilon = torch.randint_like(data, low=0, high=2).float() * 2 - 1.
else:
raise NotImplementedError(f"Hutchinson type {hutchinson_type} unknown.")
def ode_func(t, x):
sample = mutils.from_flattened_numpy(x[:-shape[0]], shape).to(data.device).type(torch.float32)
vec_t = torch.ones(sample.shape[0], device=sample.device) * t
drift = mutils.to_flattened_numpy(drift_fn(model, sample, vec_t))
logp_grad = mutils.to_flattened_numpy(div_fn(model, sample, vec_t, epsilon))
return np.concatenate([drift, logp_grad], axis=0)
init = np.concatenate([mutils.to_flattened_numpy(data), np.zeros((shape[0],))], axis=0)
solution = integrate.solve_ivp(ode_func, (eps, sde.T), init, rtol=rtol, atol=atol, method=method)
nfe = solution.nfev
zp = solution.y[:, -1]
z = mutils.from_flattened_numpy(zp[:-shape[0]], shape).to(data.device).type(torch.float32)
delta_logp = mutils.from_flattened_numpy(zp[-shape[0]:], (shape[0],)).to(data.device).type(torch.float32)
prior_logp = sde.prior_logp(z)
bpd = -(prior_logp + delta_logp) / np.log(2)
N = np.prod(shape[1:])
bpd = bpd / N
# A hack to convert log-likelihoods to bits/dim
offset = 7. - inverse_scaler(-1.)
bpd = bpd + offset
return bpd, z, nfe
return likelihood_fn
| [
"yang-song@live.cn"
] | yang-song@live.cn |
2e21c92d54845d183d2aaebd0eee1a77a39208fc | f6bba50fccc6fb0dae2f046193434cfb4b9d32d5 | /m-solutions2020/c/main.py | 0bc2f50f93f9895ce3d48877d517d39eb5471a3e | [] | no_license | seven320/AtCoder | 4c26723d20004fe46ce118b882faabc05066841c | 45e301e330e817f1ace4be4088d3babe18588170 | refs/heads/master | 2021-11-22T22:57:32.290504 | 2021-10-24T09:15:12 | 2021-10-24T09:15:12 | 162,827,473 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | #!/usr/bin/env python3
# encoding:utf-8
import copy
import random
import bisect #bisect_left これで二部探索の大小検索が行える
import fractions #最小公倍数などはこっち
import math
import sys
import collections
from decimal import Decimal # 10進数で考慮できる
mod = 10**9+7
sys.setrecursionlimit(mod) # 再帰回数上限はでdefault1000
d = collections.deque()
def LI(): return list(map(int, sys.stdin.readline().split()))
# N, K = LI()
# A = LI()
# for i in range(N - K):
# if A[i] < A[i + K]:
# ans = "Yes"
# else:
# ans = "No"
# print(ans)
N, K = LI()
A = LI()
for i in range(N - K):
if A[i] < A[i + K]:
ans = "Yes"
else:
ans = "No"
print(ans) | [
"yosyuaomenw@yahoo.co.jp"
] | yosyuaomenw@yahoo.co.jp |
01b21899501be1f686675a277d7c04a97b49c2a6 | 9c019fe81794fdecd027c9f3b6d57fb95dc646e7 | /waters/dev/pep3d_writer2.py | 6c67eadbcf283419d104844264604eb0845ffb4d | [] | no_license | MatteoLacki/waters | e29c3def911d494df5240ead114239082181f149 | 767d7d45fbe62acf0af75bbefc9bdeb4f65889e6 | refs/heads/master | 2021-07-07T11:47:25.758922 | 2020-09-20T11:24:36 | 2020-09-20T11:24:36 | 192,311,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | %load_ext autoreload
%autoreload 2
from pathlib import Path
import pandas as pd
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 5)
import numpy as np
import numpy as np
from platform import system
from waters.parsers import XMLparser, iaDBsXMLparser, Pep3Dparser, Apex3Dparser, df2text, col2format
if system() == 'Linux':
data_f = Path('~/Projects/WatersData/O190303_78').expanduser()
data_f = Path('/home/matteo/Projects/WatersData/O200114_03').expanduser()
else:
data_f = Path(r"Y:\TESTRES2\5P\S170317_04__v1")
pep3d = next(data_f.glob('*_Pep3D_Spectrum.xml'))
P3D = Pep3Dparser(pep3d)
P3D.get_all_tag_counts()
le = P3D.LE
he = P3D.HE
le['ADCResponse'] = 10000
P3D.LE = le
he['ADCResponse'] = 10000
P3D.HE = he
P3D.write(pep3d.parent/(pep3d.stem + "_ADCResponse10000.xml"))
# compare outputs: with check sums:
from syncFiles.syncFiles import check_sum
from waters.parsers import iaDBsXMLparser
ia_workflows = list(data_f.glob('*_IA_Workflow*.xml'))
for iw in ia_workflows:
print(check_sum(iw))
# check sums do differ: what about the data?
orig = ia_workflows[0]
mod = ia_workflows[1]
parsed = [iaDBsXMLparser(i) for i in ia_workflows]
prots = [i.proteins() for i in parsed]
prods = [i.products() for i in parsed]
| [
"matteo.lacki@gmail.com"
] | matteo.lacki@gmail.com |
ce1cbee1c8760bed1f92f6c835a5075202aaff8c | 6f8de50a5b51e938aca5ffe9b33bc312d68b2de0 | /tests/test_sleeping.py | 0014f207b373c6b2f459a14c16295cd8501ed1e6 | [
"MIT"
] | permissive | pawelkopka/kopf | 0621150d9cdf286a2763c1482082d4868293c6c3 | 51a3a70e09a17cf3baec2946b64b125a90595cf4 | refs/heads/master | 2021-02-27T09:42:19.108740 | 2020-02-20T16:05:16 | 2020-02-20T16:05:16 | 245,597,653 | 0 | 0 | MIT | 2020-03-07T08:46:33 | 2020-03-07T08:46:33 | null | UTF-8 | Python | false | false | 1,222 | py | import asyncio
from kopf.engines.sleeping import sleep_or_wait
async def test_sleep_or_wait_by_delay_reached(timer):
event = asyncio.Event()
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0.10, event), timeout=1.0)
assert 0.10 <= timer.seconds < 0.11
assert unslept is None
async def test_sleep_or_wait_by_event_set(timer):
event = asyncio.Event()
asyncio.get_running_loop().call_later(0.07, event.set)
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0.10, event), timeout=1.0)
assert 0.06 <= timer.seconds <= 0.08
assert 0.02 <= unslept <= 0.04
async def test_sleep_or_wait_with_zero_time_and_event_cleared(timer):
event = asyncio.Event()
event.clear()
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0, event), timeout=1.0)
assert timer.seconds <= 0.01
assert unslept is None
async def test_sleep_or_wait_with_zero_time_and_event_preset(timer):
event = asyncio.Event()
event.set()
with timer:
unslept = await asyncio.wait_for(sleep_or_wait(0, event), timeout=1.0)
assert timer.seconds <= 0.01
assert not unslept # 0/None; undefined for such case: both goals reached.
| [
"sergey.vasilyev@zalando.de"
] | sergey.vasilyev@zalando.de |
a0277aef821c0c0df03be709cad993832130f137 | f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41 | /test/test_incident_clery_geography_item.py | a2e42aee295d69416c816a915582b860257990dd | [] | no_license | CalPolyResDev/StarRezAPI | 012fb8351159f96a81352d6c7bfa36cd2d7df13c | b184e1863c37ff4fcf7a05509ad8ea8ba825b367 | refs/heads/master | 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: resdev@calpoly.edu
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.incident_clery_geography_item import IncidentCleryGeographyItem # noqa: E501
from starrez_client.rest import ApiException
class TestIncidentCleryGeographyItem(unittest.TestCase):
"""IncidentCleryGeographyItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIncidentCleryGeographyItem(self):
"""Test IncidentCleryGeographyItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.incident_clery_geography_item.IncidentCleryGeographyItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"fedorareis@gmail.com"
] | fedorareis@gmail.com |
bb558e12b38a611e6ef8ebb0b193f6cc9978081e | a74216bf9183c9124b900e2c49aed9c2df324fab | /conversion/hinter_merge_gt_poses.py | c37362ae75e4363c7bab06107b23815c702c6e5c | [
"MIT"
] | permissive | EricCousineau-TRI/sixd_toolkit | 1900bef231024ccc1b528202f01c40ac398b96ec | dd610c46ae75d09aa68d21e56505936de05d44bb | refs/heads/master | 2020-06-06T03:13:37.924124 | 2019-06-18T23:01:36 | 2019-06-18T23:01:36 | 192,622,561 | 0 | 0 | MIT | 2019-06-18T22:52:22 | 2019-06-18T22:52:22 | null | UTF-8 | Python | false | false | 1,255 | py | # Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)
# Center for Machine Perception, Czech Technical University in Prague
# Copies a selected part of a dataset.
import os
import sys
import yaml
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
gt_poses_1_path = '/local/datasets/tlod/hinterstoisser/backup/scene_02_separated_gts/scene_gt.yml'
gt_poses_2_path = '/local/datasets/tlod/hinterstoisser/backup/scene_02_separated_gts/scene_gt_brachmann.yml'
gt_poses_out_path = '/local/datasets/tlod/hinterstoisser/test/02/scene_gt.yml'
with open(gt_poses_1_path, 'r') as f:
gt_poses_1 = yaml.load(f, Loader=yaml.CLoader)
with open(gt_poses_2_path, 'r') as f:
gt_poses_2 = yaml.load(f, Loader=yaml.CLoader)
assert(sorted(gt_poses_1.keys()) == (sorted(gt_poses_2.keys())))
gt_poses_out = {}
for im_id in sorted(gt_poses_1.keys()):
gt_poses_out[im_id] = sorted(gt_poses_1[im_id] + gt_poses_2[im_id], key=lambda x: x['obj_id'])
def float_representer(dumper, value):
text = '{0:.8f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
# Store metadata
with open(gt_poses_out_path, 'w') as f:
yaml.dump(gt_poses_out, f, width=10000)
| [
"tom.hodan@gmail.com"
] | tom.hodan@gmail.com |
2ede47e112d7ee53b28b6b8ed26381669043ab46 | dddd89637373f455a476431f4fcb7e17b4e9dd57 | /py/mymodule.py | 7ee8fd4f09f5ecb889c7703003fa5ef98e954cfa | [] | no_license | DhirManish/Python | 35304eb47dea61934426fb6fc5094e1a83517cf3 | 10df7245d0964340d6c8d14cf26a9cf8f93ecf5d | refs/heads/master | 2020-06-05T07:09:41.856780 | 2015-03-07T12:53:10 | 2015-03-07T12:53:10 | 20,372,496 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mymodule.py
#
# Copyright 2014 Ajay Bhatia <ajay@dumb-box>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
def sayhello():
print("Hi! This is mymodule")
__version__ = '0.1'
| [
"prof.ajaybhatia@gmail.com"
] | prof.ajaybhatia@gmail.com |
a00358f8ab1fa0c2fdc688d9b88603e9d41fb568 | 95a1f698b1d8b7c2578d5306481f506751b0452e | /dino_history/heritage/views.py | d89f40459cc1c9e5951c7eacf0e1b85138f2b91b | [] | no_license | hyesungoh/dino_history | 45130bf8aa984282c90fa2b241401d0c038968e6 | e5f50a8b83ff445c627302af2e6ca893ef8a4af2 | refs/heads/master | 2023-01-07T14:43:28.643160 | 2020-10-26T12:48:01 | 2020-10-26T12:48:01 | 284,934,718 | 0 | 0 | null | 2020-08-24T12:10:33 | 2020-08-04T09:26:13 | Python | UTF-8 | Python | false | false | 4,231 | py | from django.shortcuts import render
from .models import Heritage
from user.models import Student
# from django.contrib.staticfiles.templatetags.staticfiles import static
# from django.contrib.staticfiles.storage import staticfiles_storage
import os
# Create your views here.
def main(request):
if request.user.is_authenticated:
user_now = request.user
dino_url = dino_img(user_now.dino_level, user_now.dino_class)
rank_dict = return_my_ranking(user_now)
return render(request, 'heritage/main.html', {'user_now': user_now,
'dino_url': dino_url,
'total': rank_dict['총'],
'gh': rank_dict['근현대'],
'chs': rank_dict['조선시대']
})
else:
return render(request, 'heritage/main_nosigned.html')
def return_my_ranking(current_user):
total_list = Student.objects.all().order_by('-cor_num')
total = list(total_list).index(current_user) + 1
gh_list = Student.objects.all().order_by('-gh_num')
gh = list(gh_list).index(current_user) + 1
chs_list = Student.objects.all().order_by('-chs_num')
chs = list(chs_list).index(current_user) + 1
sg_list = Student.objects.all().order_by('-sg_num')
sg = list(sg_list).index(current_user) + 1
ss_list = Student.objects.all().order_by('-ss_num')
ss = list(ss_list).index(current_user) + 1
rank_dict = {}
rank_dict['총'] = total
rank_dict['근현대'] = gh
rank_dict['조선시대'] = chs
rank_dict['삼국시대'] = sg
rank_dict['선사시대'] = ss
return rank_dict
def map(request):
return render(request, 'heritage/map.html')
def result(request):
name = request.GET["name"]
# 검색창에 무언가를 썼을 때
if name:
# 문화재 모델의 이름 기준으로 무언가가 포함된 오브젝트들을 가지고 옴
heritages = Heritage.objects.filter(name__contains=name)[0:7]
if len(heritages) < 1:
msg = str(name) + "이/가 들어간 문화재가 없어용 ㅜ"
return error(request, msg)
else:
# 무언가를 안썼을 때 상위 10개만 가지고 옴
heritages = Heritage.objects.all()[0:7]
return render(request, 'heritage/result.html', {'name': name, 'heritages': heritages})
def error(request, error_msg):
return render(request, 'user/error.html', {'error_msg': error_msg})
def map_result(request):
location = request.GET["location"]
if location:
heritages = Heritage.objects.filter(location__contains=location)[0:7]
else:
heritages = Heritage.objects.all()[0:7]
return render(request, 'heritage/map_result.html', {'location': location, 'heritages': heritages})
def save_heritage(request):
# txt_file_url = static('heritage/txt/heritage.txt')
# txt_file_url = staticfiles_storage.url('heritage/txt/heritage.txt')
# 문화재명1, 위치_도 + 위치_시, 이미지, 내용, 시대, 경도, 위도
if len(Heritage.objects.all()) > 10:
pass
else:
# module_dir = os.path.dirname(__file__)
# file_path = os.path.join(module_dir, '/Users/ohyeseong/Documents/django/dino_history/dino_history/heritage/heritage.txt')
module_dir = os.path.dirname(__file__)
file_path = os.path.join(module_dir, 'heritage.txt')
heritage_txt = open(file_path, 'r')
while True:
line = heritage_txt.readline()
if not line: break
this_heritage = eval(line)
temp_heritage = Heritage()
temp_heritage.name = this_heritage['문화재명1']
temp_heritage.location = this_heritage['위치_도'] + this_heritage['위치_시']
temp_heritage.dynasty = this_heritage['시대']
temp_heritage.img_url = this_heritage['이미지']
temp_heritage.content = this_heritage['내용']
temp_heritage.longitude = this_heritage['경도']
temp_heritage.latitude = this_heritage['위도']
temp_heritage.save()
return render(request, 'heritage/save_test.html')
def dino_img(level, cls):
if level == 0 or level == 1:
return level
else:
u = str(level) + '_' + str(cls)
return u
| [
"haesungoh414@gmail.com"
] | haesungoh414@gmail.com |
f8bbfce20a767ccc811dab546f25a1739002b4c9 | 8d4c6f902880f5f6b792c12a78d09afcb3cdd3d7 | /KafkaProducer.py | 2aea02ccb7d84c882c0fcad279c58b6fb77f3237 | [] | no_license | DataMarvel/DataFury | e1ae14a66253a2b67b60de8babf4e6d3fc08ea2d | 553023a8bb0a5f1f01f32d463563a39425cff21d | refs/heads/master | 2020-04-06T17:30:22.752209 | 2018-11-26T10:18:57 | 2018-11-26T10:18:57 | 157,661,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,550 | py | # -*- coding: UTF-8 -*-
"""
Created on 2018年11月20日
@author: Leo
@file: KafkaProducer
"""
# Python内部库
import json
from collections import OrderedDict
# Python第三方库
import pykafka
# 项目内部库
from DataVision.LoggerHandler.logger import VisionLogger
from DataFury.MessagePipe.KafkaPipe import KafkaMessageClient
# 日志路径
LOGGER_PATH = '../DataVision/LoggerConfig/logger_config.yaml'
# 日志
logger = VisionLogger(LOGGER_PATH)
class KafkaProducer(object):
def __init__(self,
topic_name: str,
json_config: bool = False,
**kafka_config):
"""
Kafka生产者
:param topic_name: topic名
:param json_config: 从json配置读取
:param kafka_config: kafka配置(具体看KafkaMessageClient里)
"""
if topic_name == "":
logger.vision_logger(level="ERROR", log_msg="Kafka Topic不能为空!")
return
else:
self._topic_name = topic_name.encode("UTF-8")
# 获取Kafka Client
kafka_message_client = KafkaMessageClient(json_config=json_config, **kafka_config)
# 如果使用的json的话就从json配置文件中获取topic名
if json_config:
self._topic_name = kafka_message_client.get_topic().encode("UTF-8")
# 获取client对象
self._client = kafka_message_client.get_client()
# 获取topic对象
self._topic = self._create_topic()
def _create_topic(self) -> pykafka.Topic:
"""
创建或获取topic对象
:return: 返回一个topic的实例
"""
try:
return self._client.topics[self._topic_name]
except Exception as err:
logger.vision_logger(level="ERROR", log_msg=str(err))
def get_producer(self, producer_type: str = 'sync', **producer_config) -> pykafka.Producer:
"""
创建producer对象
:param producer_type: 生产者类型(common和sync)
:return: producer对象
"""
if self._topic is None:
logger.vision_logger(level="ERROR", log_msg="创建Producer失败")
else:
if producer_type in ['common', 'sync']:
if producer_type == "common":
return self._topic.get_producer(**producer_config)
elif producer_type == "sync":
return self._topic.get_sync_producer(**producer_config)
else:
logger.vision_logger(level="ERROR", log_msg="创建Producer失败, Producer类型错误")
def produce(self, producer: pykafka.Producer, data):
"""
生产数据
:param producer: 生产者类型
:param data: 数据
"""
if isinstance(data, (dict, OrderedDict)):
try:
data = json.dumps(data).encode("UTF-8")
except Exception as err:
logger.vision_logger(level="ERROR", log_msg=str(err))
elif isinstance(data, str):
data = data.encode("UTF-8")
elif isinstance(data, (int, float)):
data = bytes(data)
else:
logger.vision_logger(level="ERROR", log_msg="暂时不支持此类型的数据进行发送!")
# 发送数据
with producer as pd:
pd.produce(data)
if __name__ == '__main__':
p = KafkaProducer(topic_name="192.168.30.243", host_port="120.77.209.23:19001")
p.produce(producer=p.get_producer(), data="123")
| [
"379978424@qq.com"
] | 379978424@qq.com |
a4225989c567f4133e52a90ae410c30dfee20c2e | 1bd2e09ce498c6bbb63e1854471941b777096bdb | /paleomix/tools/zonkey/parts/admixture.py | 8df7bd212c985e38798e8df5f83a3e60dfc8a063 | [] | no_license | muslih14/paleomix | 3f1e6f43178a132b05525b5eb296d8cf1c5d72c4 | 254fb1c4151e03217c19715a6e989dc79b1edcc5 | refs/heads/master | 2021-01-21T08:33:04.995061 | 2016-04-05T14:32:21 | 2016-04-05T14:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,712 | py | #!/usr/bin/python
#
# Copyright (c) 2016 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Parsing and validation of admixture results.
"""
import collections
CUTOFF = 0.001
class AdmixtureError(RuntimeError):
pass
def read_admixture_results(filename, data, k_groups, cutoff=CUTOFF):
key = "Group(%i)" % (k_groups,)
names = tuple(data.sample_order) + ("-",)
table = _admixture_read_results(filename, names)
_admixture_validate_ancestral_groups(data, table, k_groups, cutoff)
ancestral_groups = [[set(), value] for value in table["-"]]
for sample, row in table.iteritems():
if sample == '-':
continue
group = data.samples[sample][key]
for index, value in enumerate(row):
if value >= cutoff:
ancestral_groups[index][0].add(group)
return ancestral_groups
def get_percentiles(data, sample1, sample2, nreads, k_groups, has_ts, value):
results = {'Sample1': sample1,
'Sample2': sample2}
nreads_lower = set(row['NReads'] for row in data.simulations
if row['NReads'] <= nreads)
nreads_upper = set(row['NReads'] for row in data.simulations
if row['NReads'] >= nreads)
if nreads_lower:
selection = _select_simulations(data=data,
sample1=sample1,
sample2=sample2,
nreads=max(nreads_lower),
k_groups=k_groups,
has_ts=has_ts)
lower_bound, upper_bound = _get_percentile_range(selection, value)
results['Lower'] = {'NReads': max(nreads_lower),
'Lower': lower_bound,
'Upper': upper_bound}
if nreads_upper:
selection = _select_simulations(data=data,
sample1=sample1,
sample2=sample2,
nreads=min(nreads_upper),
k_groups=k_groups,
has_ts=has_ts)
lower_bound, upper_bound = _get_percentile_range(selection, value)
results['Upper'] = {'NReads': min(nreads_upper),
'Lower': lower_bound,
'Upper': upper_bound}
return results
def _select_simulations(data, sample1, sample2, nreads, k_groups, has_ts):
selection = []
samples = frozenset((sample1, sample2))
for row in data.simulations:
if row['K'] != k_groups or row['HasTS'] != has_ts:
continue
elif row['NReads'] != nreads:
continue
elif frozenset((row['Sample1'], row['Sample2'])) != samples:
continue
selection.append(row)
return selection
def _get_percentile_range(selection, value):
selection = [(row['Percentile'], row['Value'])
for row in selection]
selection.sort()
lower_bound = 0.0
upper_bound = 1.0
for cur_pct, cur_value in selection:
if cur_value > value:
break
lower_bound = cur_pct
for cur_pct, cur_value in reversed(selection):
if cur_value < value:
break
upper_bound = cur_pct
return lower_bound, upper_bound
def _admixture_read_results(filename, samples):
with open(filename) as handle:
lines = handle.readlines()
if len(samples) != len(lines):
raise AdmixtureError("TODO")
result = {}
for name, line in zip(samples, lines):
result[name] = [float(value) for value in line.split()]
return result
def _admixture_validate_ancestral_groups(data, table, k_groups, cutoff):
key = "Group(%i)" % (k_groups,)
groups = collections.defaultdict(dict)
for sample, row in table.iteritems():
if sample not in data.samples:
continue
group = data.samples[sample][key]
for index, value in enumerate(row):
if value >= cutoff:
groups[group][index] = True
mixed_groups = []
for group, memberships in sorted(groups.iteritems()):
count = len(memberships)
if count > 1:
mixed_groups.append("member(s) of reference group %s assigned to "
"%i ancestral populations" % (group, count))
if mixed_groups:
raise AdmixtureError("Inconsistent ADMIXTURE results: %s; "
"cannot determine ancestry!"
% ("; ".join(mixed_groups)))
| [
"MikkelSch@gmail.com"
] | MikkelSch@gmail.com |
4a095744e7e81eb74d3e2a32b8d9364eb64bc23f | 41608dcb12ce6b16ad70d55bf0f155c1877de3a5 | /src/Utils/Valuator/BSM.py | ae17d6a56f9da1e637fd97e0c594cc5dadf048f8 | [] | no_license | frankma/Finance | eb68567e827e9045e1f4f3baaead6757aefb5168 | c6fe293895e1c295b7625f051625ba0b64efada1 | refs/heads/master | 2021-01-17T03:21:14.146946 | 2017-11-25T16:49:39 | 2017-11-25T16:49:39 | 33,056,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,669 | py | import logging
from math import log, exp, sqrt
import numpy as np
from scipy.stats import norm
from src.Utils.Solver.Brent import Brent
from src.Utils.Solver.IVariateFunction import IUnivariateFunction
from src.Utils.Solver.NewtonRaphson import NewtonRaphson
from src.Utils.Types import OptionType
__author__ = 'frank.ma'
logger = logging.getLogger(__name__)
class BSM(object):
@staticmethod
def calc_d1(s: float, k: float, tau: float, r: float, q: float, sig: float):
return (log(s / k) + (r - q + 0.5 * sig ** 2) * tau) / (sig * sqrt(tau))
@staticmethod
def calc_d2(s: float, k: float, tau: float, r: float, q: float, sig: float):
return (log(s / k) + (r - q - 0.5 * sig ** 2) * tau) / (sig * sqrt(tau))
@staticmethod
def price(s: float, k: float, tau: float, r: float, q: float, sig: float, opt_type: OptionType):
eta = opt_type.value
d1 = BSM.calc_d1(s, k, tau, r, q, sig)
d2 = BSM.calc_d2(s, k, tau, r, q, sig)
return eta * (exp(-q * tau) * s * norm.cdf(eta * d1) - exp(-r * tau) * k * norm.cdf(eta * d2))
@staticmethod
def imp_vol(s: float, k: float, tau: float, r: float, q: float, price: float, opt_type: OptionType, method='Brent'):
class PriceFunction(IUnivariateFunction):
def evaluate(self, x):
return BSM.price(s, k, tau, r, q, x, opt_type) - price
class VegaFunction(IUnivariateFunction):
def evaluate(self, x):
return BSM.vega(s, k, tau, r, q, x)
pf = PriceFunction()
vf = VegaFunction()
if method == 'Brent':
bt = Brent(pf, 1e-4, 10.0)
vol = bt.solve()
elif method == 'Newton-Raphson':
nr = NewtonRaphson(pf, vf, 0.88)
vol = nr.solve()
else:
raise ValueError('Unrecognized optimization method %s.' % method)
return vol
@staticmethod
def delta(s: float, k: float, tau: float, r: float, q: float, sig: float, opt_type: OptionType):
eta = opt_type.value
d1 = BSM.calc_d1(s, k, tau, r, q, sig)
return eta * exp(-q * tau) * norm.cdf(eta * d1)
@staticmethod
def delta_k(s: float, k: float, tau: float, r: float, q: float, sig: float, opt_type: OptionType):
eta = opt_type.value
d2 = BSM.calc_d2(s, k, tau, r, q, sig)
return -eta * exp(-r * tau) * norm.cdf(eta * d2)
@staticmethod
def gamma(s: float, k: float, tau: float, r: float, q: float, sig: float):
d1 = BSM.calc_d1(s, k, tau, r, q, sig)
return exp(-q * tau) * norm.pdf(d1) / s / sig / sqrt(tau)
@staticmethod
def gamma_k(s: float, k: float, tau: float, r: float, q: float, sig: float):
d2 = BSM.calc_d2(s, k, tau, r, q, sig)
return exp(-r * tau) * norm.pdf(d2) / k / sig / sqrt(tau)
@staticmethod
def vega(s: float, k: float, tau: float, r: float, q: float, sig: float):
d1 = BSM.calc_d1(s, k, tau, r, q, sig)
return s * exp(-q * tau) * norm.pdf(d1) * sqrt(tau)
@staticmethod
def theta(s: float, k: float, tau: float, r: float, q: float, sig: float, opt_type: OptionType):
eta = opt_type.value
d1 = BSM.calc_d1(s, k, tau, r, q, sig)
d2 = BSM.calc_d2(s, k, tau, r, q, sig)
term1 = -exp(-q * tau) * s * norm.pdf(d1) * sig / 2.0 / sqrt(tau)
term2 = eta * q * s * exp(-q * tau) * norm.cdf(eta * d1)
term3 = - eta * r * k * exp(-r * tau) * norm.cdf(eta * d2)
return term1 + term2 + term3
@staticmethod
def rho(s: float, k: float, tau: float, r: float, q: float, sig: float, opt_type: OptionType):
eta = opt_type.value
d2 = BSM.calc_d2(s, k, tau, r, q, sig)
return eta * tau * exp(-r * tau) * norm.cdf(eta * d2)
class BSMVec(BSM):
@staticmethod
def calc_d1(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array):
return (np.log(s / k) + (r - q + 0.5 * sig ** 2) * tau) / (sig * sqrt(tau))
@staticmethod
def calc_d2(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array):
return (np.log(s / k) + (r - q - 0.5 * sig ** 2) * tau) / (sig * sqrt(tau))
@staticmethod
def price(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array, opt_type: OptionType):
if tau < 1e-6:
if tau < 0.0:
logger.warning('negative tau %r is provided in pricing function, return payoff.' % tau)
return BSMVec.payoff(s, k, opt_type)
else:
eta = opt_type.value
d_1 = BSMVec.calc_d1(s, k, tau, r, q, sig)
d_2 = BSMVec.calc_d2(s, k, tau, r, q, sig)
return eta * (exp(-q * tau) * s * norm.cdf(eta * d_1) - exp(-r * tau) * k * norm.cdf(eta * d_2))
@staticmethod
def payoff(s: np.array, k: np.array, opt_type: OptionType):
eta = opt_type.value
return np.maximum(eta * (s - k), np.zeros(s.__len__()))
@staticmethod
def delta(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array, opt_type: OptionType):
if tau < 1e-6:
if tau < 0.0:
logger.warning('negative tau %r is provided in delta function, return zeros.' % tau)
return np.zeros(s.__len__())
else:
eta = opt_type.value
d_1 = BSMVec.calc_d1(s, k, tau, r, q, sig)
return eta * exp(-q * tau) * norm.cdf(eta * d_1)
@staticmethod
def gamma(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array):
d_1 = BSMVec.calc_d1(s, k, tau, r, q, sig)
return exp(-q * tau) * norm.pdf(d_1) / (s * sig * sqrt(tau))
@staticmethod
def vega(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array):
d_1 = BSMVec.calc_d1(s, k, tau, r, q, sig)
return s * exp(-q * tau) * norm.pdf(d_1) * sqrt(tau)
@staticmethod
def theta(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array, opt_type: OptionType):
eta = opt_type.value
d_1 = BSMVec.calc_d1(s, k, tau, r, q, sig)
d_2 = BSMVec.calc_d2(s, k, tau, r, q, sig)
term1 = -exp(-q * tau) * s * norm.pdf(d_1) / (2.0 * sqrt(tau))
term2 = eta * q * s * exp(-q * tau) * norm.cdf(eta * d_1)
term3 = eta * r * k * exp(-r * tau) * norm.cdf(eta * d_2)
return term1 * term2 * term3
@staticmethod
def rho(s: np.array, k: np.array, tau: float, r: float, q: float, sig: np.array, opt_type: OptionType):
eta = opt_type.value
d_2 = BSMVec.calc_d2(s, k, tau, r, q, sig)
return eta * k * tau * exp(-r * tau) * norm.cdf(eta * d_2)
| [
"guang.y.ma@gmail.com"
] | guang.y.ma@gmail.com |
fd13356be0081170ae569dea23537c4688c513f0 | c77c10d8c6ef24f0dfa64a9824d584d723711f6c | /python/rest-client-samples/image/image_tagging_batch.py | eef3a11ce81454b6c0fc4dfdd5dcde8d206c2c14 | [
"Apache-2.0"
] | permissive | zhd/ais-sdk | daf2e3b3e61740e105cbdbd709a24a7ffd48ead8 | 9976a9595dd72d189b5f63e511e055251ab4e61f | refs/heads/master | 2020-04-02T01:59:44.053044 | 2018-10-20T08:47:24 | 2018-10-20T08:47:24 | 153,885,712 | 1 | 0 | Apache-2.0 | 2018-10-20T08:34:38 | 2018-10-20T08:34:37 | null | UTF-8 | Python | false | false | 2,112 | py | # -*- coding:utf-8 -*-
import sys
import urllib2
import json
import ssl
import base64
from urllib2 import HTTPError, URLError
from gettoken import get_token
reload(sys)
sys.setdefaultencoding('utf8')
def download_url_base64(url):
try:
r = urllib2.urlopen(url)
except HTTPError, e:
resp = e.read()
status_code = e.code
except URLError, e:
resp = e.read()
status_code = e.code
else:
status_code = r.code
resp = r.read()
if status_code != 200:
print "Error get url ", url, status_code
return ""
return base64.b64encode(resp)
def image_tagging(token, image_base64):
_url = 'https://ais.cn-north-1.myhuaweicloud.com/v1.0/image/tagging'
_data = {
"image": image_base64,
"language": "zh",
"limit": 10,
"threshold": 10.0
}
kreq = urllib2.Request( url = _url)
kreq.add_header('Content-Type', 'application/json')
kreq.add_header('X-Auth-Token', token )
kreq.add_data(json.dumps(_data))
resp = None
status_code = None
try:
r = urllib2.urlopen(kreq)
except HTTPError, e:
resp = e.read()
status_code = e.code
except URLError, e:
resp = e.read()
status_code = e.code
else:
status_code = r.code
resp = r.read()
return resp
def url_image_tagging(token, url):
image = download_url_base64(url)
if len(image) == 0:
print "%s\t%s" %(url, "ERRORdownload")
return
resp = image_tagging(token, image)
print "%s\t%s" %(url, resp)
if __name__ == "__main__":
user_name = "XXX"
password = "XXX"
account_name = "XXX"
url_file = sys.argv[1]
# token expire in 24hour
token = get_token(user_name, password, account_name)
if len(token) == 0:
print "Error username password"
sys.exit(-1)
# test urls in file
for line in open(url_file):
url = line.strip()
url_image_tagging(token, url)
## test a sigle url
#url_image_tagging(token, 'http://www.example.com/example.jpg')
| [
"17091412@qq.com"
] | 17091412@qq.com |
66a0abc924d8b12ab7931ec064ca79c6f391ae16 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /r8lib/r8_mod.py | 658d59050a45420aa01faa0175bd7e9e59f4f220 | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | #! /usr/bin/env python3
#
def r8_mod ( x, y ):
#*****************************************************************************80
#
## R8_MOD returns the remainder of R8 division.
#
# Formula:
#
# If
# REM = R8_MOD ( X, Y )
# RMULT = ( X - REM ) / Y
# then
# X = Y * RMULT + REM
# where REM has the same sign as X, and abs ( REM ) < Y.
#
# Example:
#
# X Y R8_MOD R8_MOD Factorization
#
# 107 50 7 107 = 2 * 50 + 7
# 107 -50 7 107 = -2 * -50 + 7
# -107 50 -7 -107 = -2 * 50 - 7
# -107 -50 -7 -107 = 2 * -50 - 7
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 July 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, real X, the number to be divided.
#
# Input, real Y, the number that divides X.
#
# Output, real VALUE, the remainder when X is divided by Y.
#
from sys import exit
if ( y == 0.0 ):
print ( '' )
print ( 'R8_MOD - Fatal error!' )
print ( ' R8_MOD ( X, Y ) called with Y = 0.' )
exit ( 'R8_MOD - Fatal error!' )
value = x - int ( x / y ) * y
if ( x < 0.0 and 0.0 < value ):
value = value - abs ( y )
elif ( 0.0 < x and value < 0.0 ):
value = value + abs ( y )
return value
def r8_mod_test ( ):
#*****************************************************************************80
#
## R8_MOD_TEST tests R8_MOD.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 July 2014
#
# Author:
#
# John Burkardt
#
import platform
from r8_uniform_ab import r8_uniform_ab
test_num = 10
print ( '' )
print ( 'R8_MOD_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8_MOD returns the remainder after division.' )
print ( '' )
print ( ' X Y (X%Y) R8_MOD(X,Y)' )
print ( '' )
x_lo = -10.0
x_hi = +10.0
seed = 123456789
for test in range ( 0, test_num ):
x, seed = r8_uniform_ab ( x_lo, x_hi, seed )
y, seed = r8_uniform_ab ( x_lo, x_hi, seed )
z1 = x % y
z2 = r8_mod ( x, y )
print ( ' %12f %12f %12f %12f' % ( x, y, z1, z2 ) )
#
# Terminate.
#
print ( '' )
print ( 'R8_MOD_TEST' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
r8_mod_test ( )
timestamp ( )
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
c45a9cdc1199bbada87416704b20196c0de40f81 | bf534da18426b49dbee0a0b1870f5f3a85922855 | /ex049tabparaqlqrnumero.py | 7a82f9bdfefab90ed265b895293c51a526aed53e | [] | no_license | kcpedrosa/Python-exercises | 0d20a72e7e68d9fc9714e3aabf4850fdbeb7d1f8 | ae35dfad869ceb3aac186fce5161cef8a77a7579 | refs/heads/master | 2021-05-20T08:46:29.318242 | 2020-04-01T15:44:36 | 2020-04-01T15:44:36 | 252,205,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | tabuada = int(input('Digite o numero da tabuada: '))
n = int(input('Digite o inicio da tabuada: '))
print ("Tabuada de %d" %tabuada)
if n <= 10:
while n <= 10:
print ("%d x %d = %d" %(tabuada, n, tabuada * n))
n = n + 1
else : print ('Você está de brincadeira. Digite um numero menor ou igual a 10') | [
"kattine.costa@gmail.com"
] | kattine.costa@gmail.com |
32f32044f17e55e730595c1e880cbc6ad0dc5ede | c9f9a4efae174ef89071f4e3cdbb4f4bba3b2eac | /test7/test7/settings.py | 9859d2eaaa1e565ac1b6b019161a100bc6717fdf | [] | no_license | rj8928/projects | 8618bf40392a140bdf6b6264fd86cae3c6321ede | 5fe78f839b918a5cf1d0db52c9b6237ffdae08c9 | refs/heads/master | 2021-01-01T18:40:12.316231 | 2017-08-06T02:24:08 | 2017-08-06T02:24:08 | 98,400,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | """
Django settings for test7 project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't(%^u!#=-oy^s5=@%cq+enhs*3v@caddry0=m3-fh*hc8w9#^v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'booktest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'test7.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test7.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME':'test3',
'USER':'root',
'PASSWORD':'jxust8928',
'HOST':'localhost',
'PORT':'3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
STATIC_ROOT = '/var/www/test7/static/'
| [
"rj8928@gmail.com"
] | rj8928@gmail.com |
4087256ac94244ef67a6445e2c928565d99a813d | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/views_20201113172600.py | 40f5096e93ababd00b378c44b61e32e0bdd41f54 | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_New_User(request):
errors = User.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/")
else:
first_name_from_post = request.POST
| [
"69405488+steven-halla@users.noreply.github.com"
] | 69405488+steven-halla@users.noreply.github.com |
5031fbd2b72ba7e0b98fd3e398eeba981cd8543b | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/yamlext/bluepy.py | 6741b4f1dd254b01051e099a6668754eb6d323ab | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 4,483 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\yamlext\bluepy.py
from collections import OrderedDict
import yaml
from . import PyIO
cyaml_supported = hasattr(yaml, 'CLoader')
def isNumber(string):
try:
int(string)
return True
except ValueError:
try:
float(string)
return True
except ValueError:
pass
return False
class BlueRepresenter(yaml.representer.Representer):
def __init__(self, default_style = None, default_flow_style = None):
yaml.representer.Representer.__init__(self, default_style, default_flow_style)
def represent_sequence(self, tag, sequence, flow_style = None):
node = yaml.representer.Representer.represent_sequence(self, tag, sequence, flow_style)
firstElement = sequence[0]
if not isinstance(firstElement, (dict, OrderedDict, list)):
node.flow_style = True
for listItem in node.value:
if isinstance(listItem.value, (str, unicode)) and not isNumber(listItem.value):
listItem.style = '"'
return node
def represent_mapping(self, tag, mapping, flow_style = None):
node = yaml.representer.Representer.represent_mapping(self, tag, mapping, flow_style)
for nodeKey, nodeValue in node.value:
keyValue = nodeKey.value
valueValue = nodeValue.value
if keyValue != 'type' and isinstance(valueValue, (str, unicode)) and not isNumber(valueValue):
nodeValue.style = '"'
return node
if cyaml_supported:
from _yaml import CEmitter
class BlueDumper(CEmitter, yaml.serializer.Serializer, BlueRepresenter, yaml.resolver.Resolver):
def __init__(self, stream, default_style = None, default_flow_style = None, canonical = None, indent = None, width = None, allow_unicode = None, line_break = None, encoding = None, explicit_start = None, explicit_end = None, version = None, tags = None):
CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags)
BlueRepresenter.__init__(self, default_style=default_style, default_flow_style=False)
yaml.resolver.Resolver.__init__(self)
class BlueLoader(yaml.CLoader):
pass
else:
class BlueDumper(yaml.emitter.Emitter, yaml.serializer.Serializer, BlueRepresenter, yaml.resolver.Resolver):
def __init__(self, stream, default_style = None, default_flow_style = None, canonical = None, indent = None, width = None, allow_unicode = None, line_break = None, encoding = None, explicit_start = None, explicit_end = None, version = None, tags = None):
yaml.emitter.Emitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break)
yaml.serializer.Serializer.__init__(self, encoding=encoding, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags)
BlueRepresenter.__init__(self, default_style=default_style, default_flow_style=True)
yaml.resolver.Resolver.__init__(self)
class BlueLoader(yaml.Loader):
pass
def _construct_mapping(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
def _dict_representer(dumper, d):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, d.items())
BlueLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _construct_mapping)
BlueDumper.add_representer(OrderedDict, _dict_representer)
class _BlueIO(PyIO):
def __init__(self):
PyIO.__init__(self)
self._loader = self._dumper = None
self._loader = BlueLoader
self._dumper = BlueDumper
def loads(s):
return _BlueIO().loads(s)
def loadfile(path):
return _BlueIO().loadfile(path)
def load(stream):
return _BlueIO().load(stream)
def dumps(obj, **kwargs):
return _BlueIO().dumps(obj, **kwargs)
def dump(obj, stream, **kwargs):
return _BlueIO().dump(obj, stream, **kwargs)
def dumpfile(obj, path, **kwargs):
return _BlueIO().dumpfile(obj, path, **kwargs)
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
dd1efcc51937fbd3a8a07281534017d5801039fb | 38efe804f2a070737984b5f4306ef9f09cbdf9c2 | /clusterDist.py | 1b14885ba62ef3ddc5e7485a39c7ad778ea9cb43 | [] | no_license | krm9c/HierarchicalDimensionReduction | 0bd59248035159c3d9fb8c9c407a3cecba7635d8 | 6af1455be32abf2e06667e7c264f52c76329ffee | refs/heads/master | 2020-05-27T21:10:03.027472 | 2018-02-04T22:49:25 | 2018-02-04T22:49:25 | 83,605,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,605 | py | #!/usr/bin/env python
# kmeans.py using any of the 20-odd metrics in scipy.spatial.distance
# kmeanssample 2 pass, first sample sqrt(N)
from __future__ import division
import random
import numpy as np
from scipy.spatial.distance import cdist # $scipy/spatial/distance.py
# http://docs.scipy.org/doc/scipy/reference/spatial.html
from scipy.sparse import issparse # $scipy/sparse/csr.py
__date__ = "2011-11-17 Nov denis"
# X sparse, any cdist metric: real app ?
# centres get dense rapidly, metrics in high dim hit distance whiteout
# vs unsupervised / semi-supervised svm
#...............................................................................
def kmeans( X, centres, delta=.001, maxiter=10, metric="euclidean", p=2, verbose=1 ):
""" centres, Xtocentre, distances = kmeans( X, initial centres ... )
in:
X N x dim may be sparse
centres k x dim: initial centres, e.g. random.sample( X, k )
delta: relative error, iterate until the average distance to centres
is within delta of the previous average distance
maxiter
metric: any of the 20-odd in scipy.spatial.distance
"chebyshev" = max, "cityblock" = L1, "minkowski" with p=
or a function( Xvec, centrevec ), e.g. Lqmetric below
p: for minkowski metric -- local mod cdist for 0 < p < 1 too
verbose: 0 silent, 2 prints running distances
out:
centres, k x dim
Xtocentre: each X -> its nearest centre, ints N -> k
distances, N
see also: kmeanssample below, class Kmeans below.
"""
if not issparse(X):
X = np.asanyarray(X) # ?
centres = centres.todense() if issparse(centres) \
else centres.copy()
N, dim = X.shape
k, cdim = centres.shape
if dim != cdim:
raise ValueError( "kmeans: X %s and centres %s must have the same number of columns" % (
X.shape, centres.shape ))
if verbose:
print "kmeans: X %s centres %s delta=%.2g maxiter=%d metric=%s" % (
X.shape, centres.shape, delta, maxiter, metric)
allx = np.arange(N)
prevdist = 0
for jiter in range( 1, maxiter+1 ):
D = cdist_sparse( X, centres, metric=metric, p=p ) # |X| x |centres|
xtoc = D.argmin(axis=1) # X -> nearest centre
distances = D[allx,xtoc]
avdist = distances.mean() # median ?
if verbose >= 2:
print "kmeans: av |X - nearest centre| = %.4g" % avdist
if (1 - delta) * prevdist <= avdist <= prevdist \
or jiter == maxiter:
break
prevdist = avdist
for jc in range(k): # (1 pass in C)
c = np.where( xtoc == jc )[0]
if len(c) > 0:
centres[jc] = X[c].mean( axis=0 )
if verbose:
print "kmeans: %d iterations cluster sizes:" % jiter, np.bincount(xtoc)
if verbose >= 2:
r50 = np.zeros(k)
r90 = np.zeros(k)
for j in range(k):
dist = distances[ xtoc == j ]
if len(dist) > 0:
r50[j], r90[j] = np.percentile( dist, (50, 90) )
print "kmeans: cluster 50 % radius", r50.astype(int)
print "kmeans: cluster 90 % radius", r90.astype(int)
# scale L1 / dim, L2 / sqrt(dim) ?
return centres, xtoc, distances
#...............................................................................
def kmeanssample( X, k, nsample=0, **kwargs ):
""" 2-pass kmeans, fast for large N:
1) kmeans a random sample of nsample ~ sqrt(N) from X
2) full kmeans, starting from those centres
"""
# merge w kmeans ? mttiw
# v large N: sample N^1/2, N^1/2 of that
# seed like sklearn ?
N, dim = X.shape
if nsample == 0:
nsample = max( 2*np.sqrt(N), 10*k )
Xsample = randomsample( X, int(nsample) )
pass1centres = randomsample( X, int(k) )
samplecentres = kmeans( Xsample, pass1centres, **kwargs )[0]
return kmeans( X, samplecentres, **kwargs )
def cdist_sparse( X, Y, **kwargs ):
""" -> |X| x |Y| cdist array, any cdist metric
X or Y may be sparse -- best csr
"""
# todense row at a time, v slow if both v sparse
sxy = 2*issparse(X) + issparse(Y)
if sxy == 0:
return cdist( X, Y, **kwargs )
d = np.empty( (X.shape[0], Y.shape[0]), np.float64 )
if sxy == 2:
for j, x in enumerate(X):
d[j] = cdist( x.todense(), Y, **kwargs ) [0]
elif sxy == 1:
for k, y in enumerate(Y):
d[:,k] = cdist( X, y.todense(), **kwargs ) [0]
else:
for j, x in enumerate(X):
for k, y in enumerate(Y):
d[j,k] = cdist( x.todense(), y.todense(), **kwargs ) [0]
return d
def randomsample( X, n ):
""" random.sample of the rows of X
X may be sparse -- best csr
"""
sampleix = random.sample( xrange( X.shape[0] ), int(n) )
return X[sampleix]
def nearestcentres( X, centres, metric="euclidean", p=2 ):
""" each X -> nearest centre, any metric
euclidean2 (~ withinss) is more sensitive to outliers,
cityblock (manhattan, L1) less sensitive
"""
D = cdist( X, centres, metric=metric, p=p ) # |X| x |centres|
return D.argmin(axis=1)
def Lqmetric( x, y=None, q=.5 ):
# yes a metric, may increase weight of near matches; see ...
return (np.abs(x - y) ** q) .mean() if y is not None \
else (np.abs(x) ** q) .mean()
#...............................................................................
class Kmeans:
""" km = Kmeans( X, k= or centres=, ... )
in: either initial centres= for kmeans
or k= [nsample=] for kmeanssample
out: km.centres, km.Xtocentre, km.distances
iterator:
for jcentre, J in km:
clustercentre = centres[jcentre]
J indexes e.g. X[J], classes[J]
"""
def __init__( self, X, k=0, centres=None, nsample=0, **kwargs ):
self.X = X
if centres is None:
self.centres, self.Xtocentre, self.distances = kmeanssample(
X, k=k, nsample=nsample, **kwargs )
else:
self.centres, self.Xtocentre, self.distances = kmeans(
X, centres, **kwargs )
def __iter__(self):
for jc in range(len(self.centres)):
yield jc, (self.Xtocentre == jc)
#...............................................................................
if __name__ == "__main__":
import random
import sys
from time import time
N = 10000
dim = 10
ncluster = 10
kmsample = 100 # 0: random centres, > 0: kmeanssample
kmdelta = .001
kmiter = 10
metric = "cityblock" # "chebyshev" = max, "cityblock" L1, Lqmetric
seed = 1
exec( "\n".join( sys.argv[1:] )) # run this.py N= ...
np.set_printoptions( 1, threshold=200, edgeitems=5, suppress=True )
np.random.seed(seed)
random.seed(seed)
print "N %d dim %d ncluster %d kmsample %d metric %s" % (
N, dim, ncluster, kmsample, metric)
X = np.random.exponential( size=(N,dim) )
# cf scikits-learn datasets/
t0 = time()
if kmsample > 0:
centres, xtoc, dist = kmeanssample( X, ncluster, nsample=kmsample,
delta=kmdelta, maxiter=kmiter, metric=metric, verbose=2 )
else:
randomcentres = randomsample( X, ncluster )
centres, xtoc, dist = kmeans( X, randomcentres,
delta=kmdelta, maxiter=kmiter, metric=metric, verbose=2 )
print "%.0f msec" % ((time() - t0) * 1000)
# also ~/py/np/kmeans/test-kmeans.py
| [
"krm9c@mst.edu"
] | krm9c@mst.edu |
5a821ae3b4527b4c618c4a37daee6aa675cb2fde | 2ee8b831f228791ce5f5bb02298ce399b301e5f5 | /virtual/bin/markdown2 | 26877791374b6c569706f45e02b86bb59f0afe02 | [
"MIT"
] | permissive | amoskipz/Blog-App | 7865c7cc5e9fcafdbe9593b4a912a8f7663315c2 | 16c2ac60cd61a1240ec32a00cafb16491be4be3a | refs/heads/master | 2023-03-15T03:59:26.140437 | 2021-03-14T09:38:25 | 2021-03-14T09:38:25 | 346,115,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | #!/home/moringa/moringa/Blog-app/virtual/bin/python3
import sys
from os.path import join, dirname, exists
# Use the local markdown2.py if we are in the source tree.
source_tree_markdown2 = join(dirname(__file__), "..", "lib", "markdown2.py")
if exists(source_tree_markdown2):
sys.path.insert(0, dirname(source_tree_markdown2))
try:
from markdown2 import main
finally:
del sys.path[0]
else:
from markdown2 import main
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| [
"oduorthomas14@gmail.com"
] | oduorthomas14@gmail.com | |
4e6f9e02a2bc25af9974e264a2cb211510c04cb5 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/doctor/migrations/0219_hospitaltiming.py | 141550a13b01289578de2f6301c2d453fb57bf04 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,663 | py | # Generated by Django 2.0.5 on 2019-03-11 10:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('doctor', '0218_hospitalimage_cover_image'),
]
operations = [
migrations.CreateModel(
name='HospitalTiming',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('day', models.PositiveSmallIntegerField(choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')])),
('start', models.DecimalField(choices=[(5.0, '5 AM'), (5.5, '5:30 AM'), (6.0, '6 AM'), (6.5, '6:30 AM'), (7.0, '7:00 AM'), (7.5, '7:30 AM'), (8.0, '8:00 AM'), (8.5, '8:30 AM'), (9.0, '9:00 AM'), (9.5, '9:30 AM'), (10.0, '10:00 AM'), (10.5, '10:30 AM'), (11.0, '11:00 AM'), (11.5, '11:30 AM'), (12.0, '12:00 PM'), (12.5, '12:30 PM'), (13.0, '1:00 PM'), (13.5, '1:30 PM'), (14.0, '2:00 PM'), (14.5, '2:30 PM'), (15.0, '3:00 PM'), (15.5, '3:30 PM'), (16.0, '4:00 PM'), (16.5, '4:30 PM'), (17.0, '5:00 PM'), (17.5, '5:30 PM'), (18.0, '6:00 PM'), (18.5, '6:30 PM'), (19.0, '7:00 PM'), (19.5, '7:30 PM'), (20.0, '8:00 PM'), (20.5, '8:30 PM'), (21.0, '9:00 PM'), (21.5, '9:30 PM'), (22.0, '10:00 PM'), (22.5, '10:30 PM'), (23.0, '11 PM'), (23.5, '11:30 PM')], decimal_places=1, max_digits=3)),
('end', models.DecimalField(choices=[(5.0, '5 AM'), (5.5, '5:30 AM'), (6.0, '6 AM'), (6.5, '6:30 AM'), (7.0, '7:00 AM'), (7.5, '7:30 AM'), (8.0, '8:00 AM'), (8.5, '8:30 AM'), (9.0, '9:00 AM'), (9.5, '9:30 AM'), (10.0, '10:00 AM'), (10.5, '10:30 AM'), (11.0, '11:00 AM'), (11.5, '11:30 AM'), (12.0, '12:00 PM'), (12.5, '12:30 PM'), (13.0, '1:00 PM'), (13.5, '1:30 PM'), (14.0, '2:00 PM'), (14.5, '2:30 PM'), (15.0, '3:00 PM'), (15.5, '3:30 PM'), (16.0, '4:00 PM'), (16.5, '4:30 PM'), (17.0, '5:00 PM'), (17.5, '5:30 PM'), (18.0, '6:00 PM'), (18.5, '6:30 PM'), (19.0, '7:00 PM'), (19.5, '7:30 PM'), (20.0, '8:00 PM'), (20.5, '8:30 PM'), (21.0, '9:00 PM'), (21.5, '9:30 PM'), (22.0, '10:00 PM'), (22.5, '10:30 PM'), (23.0, '11 PM'), (23.5, '11:30 PM')], decimal_places=1, max_digits=3)),
('hospital', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hosp_availability', to='doctor.Hospital')),
],
options={
'db_table': 'hospital_timing',
},
),
]
| [
"shashanks@policybazaar.com"
] | shashanks@policybazaar.com |
b4c92b2d75d4f543d8df6161ea6c2627d29c7cb9 | 56fd2d92b8327cfb7d8f95b89c52e1700343b726 | /odin/utilities/odin_init.py | f1c8be660e7bc9f44f1ebd0eed2725426c4967c8 | [
"MIT"
] | permissive | stjordanis/Odin | fecb640ccf4f2e6eb139389d25cbe37da334cdb6 | e2e9d638c68947d24f1260d35a3527dd84c2523f | refs/heads/master | 2020-04-15T09:13:17.850126 | 2017-02-09T00:25:55 | 2017-02-09T00:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | import os
from .params import IOFiles
def odin_init(sname):
"""This function creates a directory with the necessary substructure for
Odin to run a trading algorithm within it. Specifically, it creates a folder
with the desired strategy name within the current directory. It then creates
a subdirectory 'history' that contains relevant data on the portfolio; it
also creates a file 'main.py' that is executed in order to perform trading.
Usage
-----
This code can be used from the command line as follows:
python3 -c "from odin.utilities import odin_init ; odin_init('strat')"
Parameters
----------
sname: String.
A string giving an identifier to the directory that will house the
implementation of the strategy and dependency files.
"""
path = "./" + sname + "/"
main = path + IOFiles.main_file.value
handlers = path + IOFiles.handlers_file.value
settings = path + IOFiles.settings_file.value
strategy = path + IOFiles.strategy_file.value
fund = path + IOFiles.fund_file.value
# Create files and directories.
if not os.path.isdir(path):
os.mkdir(path)
if not os.path.isfile(main):
open(main, "a").close()
if not os.path.isfile(handlers):
open(handlers, "a").close()
if not os.path.isfile(settings):
open(settings, "a").close()
if not os.path.isfile(strategy):
open(strategy, "a").close()
if not os.path.isfile(fund):
open(fund, "a").close()
| [
"jamesbrofos@gmail.com"
] | jamesbrofos@gmail.com |
1541f0cc0729432960694e2a393e12af4eeb2c20 | f83ef53177180ebfeb5a3e230aa29794f52ce1fc | /opencv/opencv-2.4.13.6/modules/python/test/test_houghcircles.py | 32b474fc855db5f85cd7a31f56d2dcec13488b95 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | msrLi/portingSources | fe7528b3fd08eed4a1b41383c88ee5c09c2294ef | 57d561730ab27804a3172b33807f2bffbc9e52ae | refs/heads/master | 2021-07-08T01:22:29.604203 | 2019-07-10T13:07:06 | 2019-07-10T13:07:06 | 196,183,165 | 2 | 1 | Apache-2.0 | 2020-10-13T14:30:53 | 2019-07-10T10:16:46 | null | UTF-8 | Python | false | false | 2,165 | py | #!/usr/bin/python
'''
This example illustrates how to use cv2.HoughCircles() function.
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import numpy as np
import sys
from numpy import pi, sin, cos
from tests_common import NewOpenCVTests
def circleApproximation(circle):
nPoints = 30
phi = 0
dPhi = 2*pi / nPoints
contour = []
for i in range(nPoints):
contour.append(([circle[0] + circle[2]*cos(i*dPhi),
circle[1] + circle[2]*sin(i*dPhi)]))
return np.array(contour).astype(int)
def convContoursIntersectiponRate(c1, c2):
s1 = cv2.contourArea(c1)
s2 = cv2.contourArea(c2)
s, _ = cv2.intersectConvexConvex(c1, c2)
return 2*s/(s1+s2)
class houghcircles_test(NewOpenCVTests):
def test_houghcircles(self):
fn = "samples/cpp/board.jpg"
src = self.get_sample(fn, 1)
img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
circles = cv2.HoughCircles(img, cv2.cv.CV_HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0]
testCircles = [[38, 181, 17.6],
[99.7, 166, 13.12],
[142.7, 160, 13.52],
[223.6, 110, 8.62],
[79.1, 206.7, 8.62],
[47.5, 351.6, 11.64],
[189.5, 354.4, 11.64],
[189.8, 298.9, 10.64],
[189.5, 252.4, 14.62],
[252.5, 393.4, 15.62],
[602.9, 467.5, 11.42],
[222, 210.4, 9.12],
[263.1, 216.7, 9.12],
[359.8, 222.6, 9.12],
[518.9, 120.9, 9.12],
[413.8, 113.4, 9.12],
[489, 127.2, 9.12],
[448.4, 121.3, 9.12],
[384.6, 128.9, 8.62]]
matches_counter = 0
for i in range(len(testCircles)):
for j in range(len(circles)):
tstCircle = circleApproximation(testCircles[i])
circle = circleApproximation(circles[j])
if convContoursIntersectiponRate(tstCircle, circle) > 0.6:
matches_counter += 1
self.assertGreater(float(matches_counter) / len(testCircles), .5)
self.assertLess(float(len(circles) - matches_counter) / len(circles), .75) | [
"lihuibin705@163.com"
] | lihuibin705@163.com |
dbb633ad70ce2edee54c8cc9d8267977bf5cd330 | b23c6c02d9b54c987bca2e36c3506cf80fa28239 | /Python GUI samples progs/bind()_3.py | a45558950b7943b21048f81da502bed094207d39 | [] | no_license | nishikaverma/Python_progs | 21190c88460a79f5ce20bb25d1b35f732fadd642 | 78f0cadde80b85356b4cb7ba518313094715aaa5 | refs/heads/master | 2022-06-12T14:54:03.442837 | 2020-05-08T10:28:58 | 2020-05-08T10:28:58 | 262,293,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from tkinter import *
def fun(e):
key = e.char
if key=="r":
obj['bg']="red"
if key=="b":
obj['bg']="blue"
if key=="g":
obj['bg']="green"
obj=Tk()
obj.geometry("400x400")
obj.bind("<Key>",fun)
#obj.bind("r",lambda e:obj.config(bg="red"))
#obj.bind("b",lambda e:obj.config(bg="blue"))
#obj.bind("g",lambda e:obj.config(bg="green"))
obj.mainloop() | [
"nishika.verma@live.com"
] | nishika.verma@live.com |
fac0664e6843c613d99bfe54a34977ea1d1fb7f6 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/20cec02a-5cc5-11e4-af55-00155d01fe08.py | 43c152203e7fd04e7a2001b174788fa197d466ee | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | #!/usr/bin/python
################################################################################
# 20cec02a-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20cec02a-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Accounts
usernames = cli.get_secedit_account('SeServiceLogonRight')
# Output Lines
self.output = [("SeServiceLogonRight=")] + usernames
# Recommended MultiSZ
rec_usernames = ("")
for user in usernames:
if user.lower() not in rec_usernames.lower():
self.is_compliant = False
return self.is_compliant
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
ab812bb7efcb5ea4da28c1482e801a1d9950ffff | d32bc79eb8631d6bc4ab20498631ba516db4d5f7 | /654_constructMaximumBinaryTree.py | e748b488968487a50b8649fc0aaa22486be027d1 | [] | no_license | Anirban2404/LeetCodePractice | 059f382d17f71726ad2d734b9579f5bab2bba93c | 786075e0f9f61cf062703bc0b41cc3191d77f033 | refs/heads/master | 2021-10-08T04:41:36.163328 | 2021-09-28T02:16:47 | 2021-09-28T02:16:47 | 164,513,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 20 13:14:08 2019
@author: anirban-mac
"""
"""
654. Maximum Binary Tree
Given an integer array with no duplicates. A maximum tree building on this
array is defined as follow:
The root is the maximum number in the array.
The left subtree is the maximum tree constructed from left part subarray
divided by the maximum number.
The right subtree is the maximum tree constructed from right part subarray
divided by the maximum number.
Construct the maximum tree by the given array and output the root node of
this tree.
Example 1:
Input: [3,2,1,6,0,5]
Output: return the tree root node representing the following tree:
6
/ \
3 5
\ /
2 0
\
1
Note:
The size of the given array will be in the range [1,1000].
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if not nums:
return
if len(nums) == 1:
return TreeNode(nums[0])
maxval = max(nums)
max_i = nums.index(maxval)
root = TreeNode(int(maxval))
root.left = self.constructMaximumBinaryTree(nums[:max_i])
root.right = self.constructMaximumBinaryTree(nums[max_i+1:])
return root
def prettyPrintTree(self, node, prefix="", isLeft=True):
if not node:
print("Empty Tree")
return
if node.right:
self.prettyPrintTree(node.right, prefix + ("│ " if isLeft else " "), False)
print(prefix + ("└── " if isLeft else "┌── ") + str(node.val))
if node.left:
self.prettyPrintTree(node.left, prefix + (" " if isLeft else "│ "), True)
treelist = [3,2,1,6,0,5]
treeNode = Solution().constructMaximumBinaryTree(treelist)
Solution().prettyPrintTree(treeNode,"",True) | [
"anirban-mac@Anirbans-MacBook-Pro.local"
] | anirban-mac@Anirbans-MacBook-Pro.local |
821a27a6f20fe9d4a2cfabbae9f68ed0bca88dfd | b4972d81804f0095c72da5d08574afecdc4725f1 | /config/wsgi.py | 94ac6fdcac7ec32ff53473877f00d63276533ccf | [] | no_license | NLPDev/Solar_gentelella | edf2ae9c919ad91603e2661b0d251567d4a44486 | b9414990da148687b56fe4564ae933378c22ca5c | refs/heads/master | 2022-10-08T19:57:52.268335 | 2020-06-12T03:02:06 | 2020-06-12T03:02:06 | 271,695,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | """
WSGI config for solar project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# solar directory.
app_path = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
)
sys.path.append(os.path.join(app_path, "solar"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"vasile123andronic@gmail.com"
] | vasile123andronic@gmail.com |
d2cdfeaa05389cfeef257de3aa9711d8a69f9f6f | d7da288db4fd9fc0bb1c60c5074f290b5f70c8ef | /Aulas Python/Conteúdo das Aulas/020/Exercícios/Exercício 2.py | 185ddecbe92dcd8251ec5a48a006df70f0350f8b | [] | no_license | luizdefranca/Curso-Python-IgnoranciaZero | dbf4cf342b3f3efea6fb3b8cf27bf39ed92927e9 | 9fbf2f25e3e6fce1f1582af0bd6bc7dbc5b9f588 | refs/heads/master | 2020-04-09T07:17:00.735378 | 2016-09-12T10:51:37 | 2016-09-12T10:51:37 | 67,999,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """
Faça um programa que calcule as raízes de uma equação do segundo grau,
na forma ax2 + bx + c. O programa deverá pedir os valores de a, b e c e fazer
as consistências, informando ao usuário nas seguintes situações:
a. Se o usuário informar o valor de A igual a zero, a equação não é do
segundo grau e o programa não deve fazer pedir os demais valores,
sendo encerrado;
b. Se o delta calculado for negativo, a equação não possui raizes reais.
Informe ao usuário e encerre o programa;
c. Se o delta calculado for igual a zero a equação possui apenas uma raiz
real; informe-a ao usuário;
d. Se o delta for positivo, a equação possui duas raiz reais; informe-as
ao usuário;
delta = b**2 - 4*a*c
raiz = (-b +ou-(delta**(1/2)))/(2*a)
"""
| [
"luizramospe@hotmail.com"
] | luizramospe@hotmail.com |
bb87629628ed14fb42b26c5249d5785feefed7b6 | 8d3fd439c6d5a52eda578847545234b2ebdc4f3b | /机器学习百科/pycode/ML-NLP/MachineLearning/Linear Regression/housing_price.py | 44536b78d7158b6ca2a4262ab10c88fedf26ab6e | [] | no_license | gm-p/practice_demo | d1530dcdb3de95832f1fa5b6e30c75e7ca6acc05 | 7eaa825fc634ad21aea48713133c0266a44ac54a | refs/heads/main | 2023-03-31T23:01:02.302579 | 2021-04-04T13:29:50 | 2021-04-04T13:29:50 | 354,193,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import os
import numpy as np
import pandas as pd
np.random.seed(36)
import matplotlib
import seaborn
import matplotlib.pyplot as plt
from sklearn import datasets
housing = pd.read_csv('kc_train.csv', header=None)
target = housing.iloc[:, 1]
housing.drop([1], inplace=True, axis=1)
housing.columns = range(housing.shape[1])
print(housing.head())
print(target.head())
t = pd.read_csv('kc_test.csv', header=None)
# 数据预处理
housing.info()
# 特征缩放
from sklearn.preprocessing import MinMaxScaler
minmax_scaler = MinMaxScaler()
minmax_scaler.fit(housing)
scaler_housing = minmax_scaler.transform(housing)
scaler_housing = pd.DataFrame(scaler_housing, columns=housing.columns)
mm = MinMaxScaler()
mm.fit(t)
scaler_t = mm.transform(t)
scaler_t = pd.DataFrame(scaler_t, columns=t.columns)
# 选择基于梯度下降的线性回归模型
from sklearn.linear_model import LinearRegression
lr_reg = LinearRegression()
lr_reg.fit(scaler_housing, target)
# 使用均方误差用于评估模型好坏
from sklearn.metrics import mean_squared_error
preds = lr_reg.predict(scaler_housing) # 输入数据进行预测得到结果
mse = mean_squared_error(preds, target) # 使用均方误差评价模型好坏,可以输出mse进行查看评价值
# 绘图进行比较
plt.figure(figsize=(10, 7)) # 画布大小
num = 100
x = np.arange(1, num+1) # 取100个点进行比较
plt.plot(x, target[:num], label='target') # 目标取值
plt.plot(x, preds[:num], label='preds') # 预测取值
plt.legend(loc='upper right') # 线条显示位置
plt.show()
# 输出测试数据预测结果
result = lr_reg.predict(scaler_t)
df_result = pd.DataFrame(result)
df_result.to_csv('result.csv', index=False, header=False) | [
"abc"
] | abc |
e5a016a9d7842de12cf1230ba542c4a962e9c79d | 10b3f8b1bb2d43a053558e2974b1190ec5af9ab3 | /test/functional/rpc_bind.py | 5add6e40b9cff94c7002a194b59a6b71906076cc | [
"MIT"
] | permissive | Satoex/Sato | ff4683226c2cedb14203a86af68ae168e3c45400 | fda51ccc241ca426e838e1ba833c7eea26f1aedd | refs/heads/master | 2022-07-27T23:30:32.734477 | 2022-01-29T17:44:00 | 2022-01-29T17:44:00 | 346,001,467 | 6 | 8 | null | null | null | null | UTF-8 | Python | false | false | 5,058 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Sato Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running satod with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import SatoTestFramework, SkipTest
from test_framework.util import assert_equal, get_rpc_proxy, rpc_url, get_datadir_path, rpc_port, assert_raises_rpc_error
from test_framework.netutil import addr_to_hex, get_bind_addrs, all_interfaces
class RPCBindTest(SatoTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
"""
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
"""
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
"""
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
"""
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes[0].rpchost = None
self.start_nodes([base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coverage_dir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for _, ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close()
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
default_port = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', default_port), ('::1', default_port)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', default_port)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', default_port)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', default_port)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', default_port), ('::1', default_port)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, default_port)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, default_port)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, default_port)
if __name__ == '__main__':
RPCBindTest().main()
| [
"78755872+Satoex@users.noreply.github.com"
] | 78755872+Satoex@users.noreply.github.com |
990e8157dbaa6f55997fe8deb4a9f13dc4544538 | 0061e0c95b8068568a4cfe575748bb188296ccc1 | /backend/users/migrations/0002_auto_20210219_1532.py | e86be94450eb2a079af956aaa4949e1d756b8d34 | [] | no_license | crowdbotics-apps/dimelo-24619 | cd7f1fb3693a8f48ae09f3815478723853d34dab | e17433535325b344128c7dfec7f3b79ae44d9db6 | refs/heads/master | 2023-03-07T00:05:14.598992 | 2021-02-19T15:33:15 | 2021-02-19T15:33:15 | 340,406,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # Generated by Django 2.2.19 on 2021-02-19 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
708d0a15a49ca80a634e116056ef0ecf75446bac | 71012df2815a4666203a2d574f1c1745d5a9c6dd | /4 Django/solutions/blogproj/users/views.py | d1bf844a9039d17be4df416bcd6fbf344ced543f | [] | no_license | PdxCodeGuild/class_mouse | 6c3b85ccf5ed4d0c867aee70c46af1b22d20a9e8 | 40c229947260134a1f9da6fe3d7073bee3ebb3f7 | refs/heads/main | 2023-03-23T14:54:39.288754 | 2021-03-20T01:48:21 | 2021-03-20T01:48:21 | 321,429,925 | 1 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,900 | py | from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.urls.base import reverse
from blog.models import BlogPost
# Create your views here.
def signup_user(request):
if request.method == "POST":
form = request.POST
username = form['username']
email = form['email']
password = form['password']
password2 = form['password2']
if not User.objects.filter(username=username).exists():
user = User()
user.username = username
user.email = email
user.set_password(password)
user.save()
# user = User.objects.create_user(username, email, password)
login(request, user)
return HttpResponseRedirect(reverse('users:profile'))
else:
print("User exists already.")
return render(request, 'users/signup.html')
def login_user(request):
if request.method == "POST":
form = request.POST
username = form['username']
password = form['password']
user = authenticate(request, username=username, password=password)
print(username, password, user)
if user is not None:
print('logging in user')
login(request, user)
return HttpResponseRedirect(reverse('users:profile'))
return render(request, 'users/login.html')
def logout_user(request):
logout(request)
return HttpResponseRedirect(reverse('users:login'))
def profile(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('users:login'))
blogs = BlogPost.objects.filter(user=request.user)
context = {
'blogs': blogs
}
return render(request, 'users/profile.html', context)
| [
"anthony@Anthonys-MBP.lan"
] | anthony@Anthonys-MBP.lan |
37088948ab97d7b4ad95cae9fe4ab71c0642f49b | 10d98fecb882d4c84595364f715f4e8b8309a66f | /hal/experiment_config/__init__.py | 7e2101a9f156beed9225c586e2942ebe94a987af | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 985 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init module of experiment config."""
from .image_standard_setting import image_standard_setting
from .state_standard_setting import state_standard_setting
from hal.utils.config import Config
collection = {
'state_standard': state_standard_setting,
'image_standard': image_standard_setting,
}
def get_exp_config(exp_name):
return Config(collection[exp_name]())
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
e057bda25b33a7f25156d496ddf7aa29c3c7caf3 | 6782a4d4c4406982a66d98466d448b5b8ea2d366 | /tensorflow/compiler/mlir/quantization/tensorflow/python/representative_dataset.py | f7aceee1484d7f21224427847ada64eccd1b1b91 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | MaiKuraki/tensorflow | 7afc33f4bc505c794f51dd6cbbfc44a3233ea986 | 46c4751f554e5e7f249cbe6642f5f874ad7614a4 | refs/heads/master | 2022-07-31T20:48:39.704555 | 2022-06-29T18:01:02 | 2022-06-29T18:06:10 | 183,984,120 | 0 | 0 | Apache-2.0 | 2019-04-29T02:11:07 | 2019-04-29T02:11:07 | null | UTF-8 | Python | false | false | 1,261 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines types required for representative datasets for quantization."""
from typing import Iterable, Mapping, Tuple, Union
from tensorflow.python.types import core
# A representative sample should be either:
# 1. (signature_key, {input_key -> input_value}) tuple, or
# 2. {input_key -> input_value} mappings.
RepresentativeSample = Union[Tuple[str, Mapping[str, core.TensorLike]],
Mapping[str, core.TensorLike]]
# A representative dataset is an iterable of representative samples.
RepresentativeDataset = Iterable[RepresentativeSample]
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
66f7a3f8de66cba4398c75fddb4dd631c1d28890 | 888f65551bb3fe1b8e84c205796b24678669a649 | /venv/lib/python3.7/site-packages/series/db.py | 19240d5a1c920be0fd897e64a552f1e28e07b4c9 | [] | no_license | chunharrison/NBA-Predictor | e6514c70f2cf26d6db4c14aee225cfbd9d5984a7 | 967951ba34debee012385af63f2bf8031dee51ca | refs/heads/master | 2022-05-04T22:02:03.374496 | 2019-05-15T05:55:34 | 2019-05-15T05:55:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,675 | py | import os
import threading
from pkg_resources import resource_filename
from sqlpharmacy.core import Database as SPDatabase
from sqlalchemy import Column, Integer, Boolean, String, Float
import alembic
from alembic.config import Config
from series.logging import Logging
from series.errors import InvalidDBError
class Database(SPDatabase, Logging):
def __init__(self, root_module, connection_string='sqlite:///',
connect=True, auto_upgrade=True, **kw):
self._root_module = root_module
self.url = connection_string
self.lock = threading.RLock()
Database.register()
self._connect_args = dict(check_same_thread=False)
self._db_args = kw
self._setup_alembic()
self._connected = False
if connect:
self.connect()
if auto_upgrade and self._outdated:
self.upgrade('head')
def _setup_alembic(self):
ini = resource_filename(self._root_module, 'alembic.ini')
self._alembic_cfg = Config(ini)
script = resource_filename(self._root_module, 'alembic')
self._alembic_cfg.set_main_option('script_location', script)
self._alembic_cfg.set_main_option('sqlalchemy.url', self.url)
self._alembic_script = alembic.script.ScriptDirectory.from_config(
self._alembic_cfg)
def connect(self, create=True):
super().__init__(self.url, connect_args=self._connect_args,
**self._db_args)
if create:
self.create_tables()
self._connected = True
def disconnect(self):
self._connected = False
self.session.remove()
def query(self, *a, **kw):
if not self._connected:
self.connect()
return self.session.query(*a, **kw)
def add(self, data):
self.session.add_then_commit(data)
def delete(self, data):
self.session.delete_then_commit(data)
def commit(self):
if self._connected:
self.session.commit()
else:
self.log.error('Tried to commit while not connected!')
def upgrade(self, revision):
if not self._connected:
self.connect()
alembic.command.upgrade(self._alembic_cfg, revision)
alembic.command.upgrade(self._alembic_cfg, revision, sql=True)
@property
def _outdated(self):
return self._current_head != self._current_revision
@property
def _current_head(self):
return self._alembic_script.get_current_head()
@property
def _current_revision(self):
return self._migration_context.get_current_revision()
@property
def _migration_context(self):
if not self._connected:
self.connect()
connection = self.session.connection()
return alembic.migration.MigrationContext.configure(connection)
def revision(self, message):
''' Autogenerate a migration file with upgrade/downgrade info by
connecting to an outdated db without creating tables, applying
all previous migrations (upgrading to 'head') and calling the
alembic command 'revision'.
'''
self.connect(create=False)
self.upgrade('head')
alembic.command.revision(self._alembic_cfg, message=message,
autogenerate=True)
class FileDatabase(Database):
def __init__(self, root_module, _path, **kw):
self._path = _path
self._check_path()
connection_string = 'sqlite:///{}'.format(self._path)
super().__init__(root_module, connection_string,
**kw)
def _check_path(self):
_dir = self._path.parent
if _dir and not _dir.is_dir():
_dir.mkdir(parents=True, exist_ok=True)
if self._path.is_dir():
raise InvalidDBError('Is a directory!')
self._new_db = not self._path.is_file()
def upgrade(self, revision):
''' If a nonexisting file has been specified as db, alembic will
not set the revision number on creation. Thus, a complete
migration history will be attempted on a database with current
head, and fail. Check here if the file had existed before, and
if not, only write the requested revision to the db. Otherwise,
do the upgrade.
'''
if self._new_db:
alembic.command.stamp(self._alembic_cfg, revision)
alembic.command.stamp(self._alembic_cfg, revision, sql=True)
else:
super().upgrade(revision)
__all__ = ('Database', 'Column', 'Integer', 'Boolean', 'FileDatabase', 'String', 'Float')
| [
"wjsdntjr@hotmail.com"
] | wjsdntjr@hotmail.com |
991b6836fb9e29cd210ef5db268f2febb4e34bbb | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /vedat/dist/python-egenix-mx-base/actions.py | c1faa95bac5817768dc6a725b7a2096051d0a330 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 1,153 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import get
MX_DIR = "/usr/lib/%s/site-packages/mx" % get.curPYTHON()
EMPTY_DIRS = ["BeeBase/Doc", "DateTime/Doc", "Doc", "Queue/Doc", "Stack/Doc", "TextTools/Doc", "Tools/Doc", "UID/Doc", \
"URL/Doc", "DateTime/Examples", "TextTools/Examples"]
def install():
pythonmodules.install()
pisitools.dodoc("mx/LICENSE", "mx/COPYRIGHT")
pisitools.dodoc("LICENSE", "COPYRIGHT", "MANIFEST", "README")
# Make dir under docs for examples
pisitools.dodir("%s/%s/Examples/DateTime" % (get.docDIR(), get.srcNAME()))
pisitools.dodir("%s/%s/Examples/TextTools" % (get.docDIR(), get.srcNAME()))
# Move examples from /usr/lib
pisitools.domove("%s/DateTime/Examples/*.py" % MX_DIR, "%s/%s/Examples/DateTime/" % (get.docDIR(), get.srcNAME()))
pisitools.domove("%s/TextTools/Examples/*.py" % MX_DIR, "%s/%s/Examples/TextTools/" % (get.docDIR(), get.srcNAME()))
| [
"vedat@pisi_linux1.0"
] | vedat@pisi_linux1.0 |
8d69b25e44042226f07ebb909716a8ce394a4844 | 91e810849c5680311ce51c9e66289afb0a76336c | /docs/source/conf.py | 2241ae833fcd4f0e8d257f2c255164791026f69e | [
"MIT"
] | permissive | zhanglixixi/pytool | 307b0505dfa6312223239d34c0dc6a03904e1df3 | 35fb633149b663e6a0fe69c666e5883172223b45 | refs/heads/master | 2022-03-13T08:43:27.572958 | 2019-12-16T07:54:13 | 2019-12-16T07:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,947 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
#
# Added by Zhi Liu
# for using Read the Docs theme
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'PyTool'
copyright = '2018, Zhi Liu'
author = 'Zhi Liu'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTooldoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyTool.tex', 'PyTool Documentation',
'Zhi Liu', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pytool', 'PyTool Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyTool', 'PyTool Documentation',
author, 'PyTool', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"zhiliu.mind@gmail.com"
] | zhiliu.mind@gmail.com |
692da999c2b06adfee50cf10ed6fcc7a977ca421 | 70aa3a1cefde69909fd9a2ede9a4bdfbb7e434ad | /sobel.py | 36ea7cd7ce655d9c20410d4d2e870acffa768b1a | [] | no_license | cflin-cjcu/opencvttt | 512cf2ba70cc9d30cb97e733172d94396cfff56f | 07add34638fc043d02f09459246aa47975c5a9c7 | refs/heads/master | 2023-06-27T16:00:20.070365 | 2021-07-30T08:54:14 | 2021-07-30T08:54:14 | 389,538,395 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('./images/test1.jpg',0)
sobel = cv.Sobel(img,cv.CV_8U,1,1,ksize=3)
sobelx = cv.Sobel(img,cv.CV_32F,1,0,ksize=3)
sobely = cv.Sobel(img,cv.CV_32F,0,1,ksize=3)
sobel2 = cv.Sobel(img,cv.CV_8U,1,1,ksize=5)
cv.imshow('img',img)
cv.imshow('sobel3',sobel)
cv.imshow('sobelx',sobelx)
cv.imshow('sobely',sobely)
cv.imshow('sobel5',sobel2)
cv.waitKey(0)
cv.destroyAllWindows() | [
"cflin@mail.cjcu.edu.tw"
] | cflin@mail.cjcu.edu.tw |
7dd64a3c50ed789c5881b6291252c71d5b8ced14 | 91d7987874dcfa0d8dbbd9a3a3831ed9b67691f8 | /yamaxun/yamaxun/pipelines.py | 8013054e6fa3e4bd1a8179243b752ee656e01dd5 | [] | no_license | lsz1995/amazon | c9388cc78f8465804b53e8759940ebc9625cbdd6 | e648ff21f642632e30925ffab1d3a4608eb201ca | refs/heads/master | 2020-03-19T05:55:23.726649 | 2018-06-04T05:31:22 | 2018-06-04T05:31:22 | 135,974,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
from yamaxun.items import YamaxunItem
class YamaxunPipeline(object):
def process_item(self, item, spider):
return item
class mongoPipeline(object):
collection = 'huawei'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_RUI'),
mongo_db=crawler.settings.get('MONGO_DB')
)
# 爬虫启动将会自动执行下面的方法
def open_spider(self,spider):
self.client = MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
# 爬虫项目关闭调用的方法
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
if isinstance(item,YamaxunItem):
table = self.db[self.collection]
data = dict(item)
table.update({'ID':item["ID"]},{'$set':data},True)
#table.insert_one(data)
return "OK!"
| [
"qqlsz87@126.com"
] | qqlsz87@126.com |
ed5ee8aa6d39cf4b84696afef021cd774a662000 | edfb435ee89eec4875d6405e2de7afac3b2bc648 | /branches/os_x_native_events/py/selenium/webdriver/firefox/webdriver.py | bcd92df9ba2c6fe70e1a5d3cb1a663660412ab26 | [
"Apache-2.0"
] | permissive | Escobita/selenium | 6c1c78fcf0fb71604e7b07a3259517048e584037 | f4173df37a79ab6dd6ae3f1489ae0cd6cc7db6f1 | refs/heads/master | 2021-01-23T21:01:17.948880 | 2012-12-06T22:47:50 | 2012-12-06T22:47:50 | 8,271,631 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,145 | py | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httplib
from selenium.webdriver.common.exceptions import ErrorInResponseException
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.firefox.firefoxlauncher import FirefoxLauncher
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.extensionconnection import ExtensionConnection
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import urllib2
import socket
class WebDriver(RemoteWebDriver):
"""The main interface to use for testing,
which represents an idealised web browser."""
def __init__(self, profile=None, timeout=30):
"""Creates a webdriver instance.
Args:
profile: a FirefoxProfile object (it can also be a profile name,
but the support for that may be removed in future, it is
recommended to pass in a FirefoxProfile object)
timeout: the amount of time to wait for extension socket
"""
port = self._free_port()
self.browser = FirefoxLauncher()
if type(profile) == str:
# This is to be Backward compatible because we used to take a
# profile name
profile = FirefoxProfile(name=profile, port=port)
if not profile:
profile = FirefoxProfile(port=port)
self.browser.launch_browser(profile)
RemoteWebDriver.__init__(self,
command_executor=ExtensionConnection(timeout),
desired_capabilities=DesiredCapabilities.FIREFOX)
def _free_port(self):
port = 0
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind((socket.gethostname(), 0))
port = free_socket.getsockname()[1]
free_socket.close()
return port
def _execute(self, command, params=None):
try:
return RemoteWebDriver.execute(self, command, params)
except ErrorInResponseException, e:
# Legacy behavior: calling close() multiple times should not raise
# an error
if command != Command.CLOSE and command != Command.QUIT:
raise e
except urllib2.URLError, e:
# Legacy behavior: calling quit() multiple times should not raise
# an error
if command != Command.QUIT:
raise e
def create_web_element(self, element_id):
"""Override from RemoteWebDriver to use firefox.WebElement."""
return WebElement(self, element_id)
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except httplib.BadStatusLine:
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
self.browser.kill()
def save_screenshot(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
"""
png = self._execute(Command.SCREENSHOT)['value']
try:
f = open(filename, 'w')
f.write(base64.decodestring(png))
f.close()
except IOError:
return False
finally:
del png
return True
| [
"simon.m.stewart@07704840-8298-11de-bf8c-fd130f914ac9"
] | simon.m.stewart@07704840-8298-11de-bf8c-fd130f914ac9 |
9e4b4043b10db5279e165da2fd0224214758405e | 8b2e795c3040a2ef1d3f0c21752bec57a0614bd6 | /venv/Scripts/pilfont.py | bbb82dc78d2a2729dce7cbeafae8e6fb6cfc97ae | [] | no_license | harshit8858/NHDO | c75e244dfdc91817b3047d65c7be610f3e18aba3 | 6a5ea2de4ba607c20c0b9bd241e6b1c82090eba9 | refs/heads/master | 2023-01-06T20:18:33.795898 | 2018-01-03T07:39:04 | 2018-01-03T07:39:04 | 105,629,451 | 1 | 3 | null | 2022-12-20T22:32:34 | 2017-10-03T08:26:57 | Python | UTF-8 | Python | false | false | 1,057 | py | #!c:\users\harshi~1\nhdo\venv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"harshit8858@gmail.com"
] | harshit8858@gmail.com |
df9a72c962406e7277fb7cc77a71ebf225417ea8 | 635e0b896b75b7d496f60368ae4e1a8b8b41546a | /api/v1/views/index.py | 703b0f26519138ec448b84b06203f7aa84340147 | [
"LicenseRef-scancode-public-domain"
] | permissive | SimonBr017/AirBnB_clone_v3 | 3add07db83b11b606084c27ce271ece9f5e255ff | 9427f89680c12fdc9f2b4b30f3a7a2ad2405cbb4 | refs/heads/main | 2023-08-28T10:29:58.841924 | 2021-09-20T17:57:33 | 2021-09-20T17:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #!/usr/bin/python3
""" Blueprint routes """
from api.v1.views import app_views
from flask import jsonify
from models import storage
@app_views.route('/status', strict_slashes=False)
def status():
""" Return the status in JSON format """
return jsonify({'status': 'OK'})
@app_views.route('/stats', strict_slashes=False)
def stats():
""" Retrieves the number of each objects by type """
return {"amenities": storage.count('Amenity'),
"cities": storage.count('City'),
"places": storage.count('Place'),
"reviews": storage.count('Review'),
"states": storage.count('State'),
"users": storage.count('User')
}
| [
"etiennebrxv@gmail.com"
] | etiennebrxv@gmail.com |
43363719bdcfc095aafb427a6bd230358c843e28 | 8b53a8b9803d92003f3a3a9e1b08def7642ba35d | /TALLERES/TAL3_while_for_20210217_cur/2_while_1hastan.py | 61c8e9c8dec335c9e77d6c5eca291edc005ba68a | [] | no_license | smarulan613/fundamentos_prog_20211_sebasmc | 637cdf9e1f61de0f876fe74530df4e6a5b40d6a6 | 0a87d81dae2bd5656a3e6a521585da661efe6cf6 | refs/heads/main | 2023-05-02T04:26:47.035698 | 2021-05-27T03:37:05 | 2021-05-27T03:37:05 | 356,059,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 19:38:09 2021
@author: R005
"""
#Codificar un programa que solicite la carga de un valor positivo y nos muestre desde
#1 hasta el valor ingresado de uno en uno
n=int(input("Ingrese el valor final:"))
x=1
while x<=n:
print(x)
x=x+1
| [
"noreply@github.com"
] | smarulan613.noreply@github.com |
b19873da3ddd42c1513de7fe80fb969ef2c415d5 | db6b79665d35eb6a44c81c68b01e947b534e308d | /code/setup.py | ca4507f4acf8084c73ad663dd5573ebaa1eae431 | [] | no_license | davidwhogg/crushinator | ffe4536b04a1239dcf04334ea6975d367c2ac0ca | 4f0f11503af6bab5ee4498cb6981ddf8b8307339 | refs/heads/master | 2018-12-29T00:38:20.145527 | 2014-01-11T19:47:42 | 2014-01-11T19:47:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # python setup.py build_ext --inplace --rpath=...
import os
from distutils.core import setup, Extension
from Cython.Distutils import build_ext
ext = [Extension('interpolation', ['./crushinator/interpolation.pyx'],
libraries=['gsl', 'gslcblas'],
library_dirs=['/home/rfadely/local/lib/'],
include_dirs=['/home/rfadely/local/include/', '.']),
Extension('flux_calculation', ['./crushinator/flux_calculation.pyx'])]
setup(cmdclass={'build_ext':build_ext}, ext_modules=ext)
os.system('mv interpolation.so ./crushinator/')
os.system('mv flux_calculation.so ./crushinator/')
| [
"rossfadely@gmail.com"
] | rossfadely@gmail.com |
2367b8d14a26b5f8da01129ea04dcd71079fae39 | 6aab2d11b3ab7619ee26319886dcfc771cbcaba5 | /0x08-python-more_classes/2-rectangle.py | c7653413c47b30e26a5abc2a2c2a3e0c806a6936 | [] | no_license | IhebChatti/holbertonschool-higher_level_programming | ef592f25eb077e182a0295cb5f2f7d69c7a8ab67 | ca58262c6f82f98b2022344818e20d382cf82592 | refs/heads/master | 2022-12-18T10:06:30.443550 | 2020-09-24T17:31:30 | 2020-09-24T17:31:30 | 259,174,423 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | #!/usr/bin/python3
"""Defining a rectangle
"""
class Rectangle:
"""Rectangle
"""
def __init__(self, width=0, height=0):
"""initializing a Rectangle
Keyword Arguments:
width {int} -- [width of rectangle] (default: {0})
height {int} -- [height of rectangle] (default: {0})
"""
self.height = height
self.width = width
@property
def width(self):
"""width getter
Returns:
[int] -- [returns the width]
"""
return self.__width
@width.setter
def width(self, value):
"""width setter
Arguments:
value {[int]} -- [the value of width]
Raises:
TypeError: [if value is not an int]
ValueError: [if value is negative]
"""
if not isinstance(value, int):
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
@property
def height(self):
"""height getter
Returns:
[int] -- [returns the height]
"""
return self.__height
@height.setter
def height(self, value):
"""height setter
Arguments:
value {[int]} -- [the value of height]
Raises:
TypeError: [if height is not an int]
ValueError: [if height is negative]
"""
if not isinstance(value, int):
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
def area(self):
"""Area of Rectangle
Returns:
[int] -- [returns the area of rectangle]
"""
return self.width * self.height
def perimeter(self):
"""Perimeter of Rectangle
Returns:
[int] -- [retunrs the perimeter of rectangle]
"""
if self.width == 0 or self.height == 0:
return 0
return (self.height + self.width) * 2
| [
"iheb.chatti@holbertonschool.com"
] | iheb.chatti@holbertonschool.com |
5ea3fa316976d2ea5d4af97f47e4e3e72025754f | 5a424888b89eb2b480cbe1bc484e5ed95427f2e1 | /plot/a_dzliu_code_Plot_CSFRD.py | 07a6c14b8fb5e0fb20988373719e910d55cf8b27 | [] | no_license | 1054/DeepFields.GalaxyModelling | 07f7d33b987613d01a900126d4720f96b3964414 | 5cd31e80782f079040577d94d046ab0728f0c3bc | refs/heads/master | 2023-01-01T09:18:27.188092 | 2020-10-25T22:48:33 | 2020-10-25T22:48:33 | 107,806,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,771 | py | #!/usr/bin/env python
#
import os, sys, re, json, numpy, astropy, matplotlib, subprocess
#matplotlib.use('Qt5Agg')
from astropy.table import Table
from astropy import units as u
from matplotlib import pyplot as plt
from matplotlib import ticker as ticker
import numpy as np
from pprint import pprint
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.725)
sys.path.append('/Users/dzliu/Cloud/GitLab/AlmaCosmos/Plot/Common_Python_Code')
from setup_matplotlib import setup_matplotlib; setup_matplotlib()
#matplotlib.rcParams['text.usetex'] = True
#matplotlib.rcParams['text.latex.preamble'] = [r'\usepackage[cm]{sfmath}']
#matplotlib.rcParams['font.sans-serif'] = 'cm'
#matplotlib.rcParams['font.family'] = 'sans-serif'
from calc_galaxy_main_sequence import (
calc_SFR_MS_Speagle2014,
calc_SFR_MS_Sargent2014,
calc_SFR_MS_Whitaker2014,
calc_SFR_MS_Bethermin2015,
calc_SFR_MS_Schreiber2015,
calc_SFR_MS_Lee2015,
calc_SFR_MS_Tomczak2016,
calc_SFR_MS_Pearson2018,
calc_SFR_MS_Leslie20180901,
calc_SFR_MS_Leslie20190111,
calc_SFR_MS_Leslie20190515,
calc_SFR_MS_Leslie20190710,
calc_SFR_MS_Leslie20191212,
calc_SFR_MS_Scoville2017,
)
from calc_cosmic_star_formation_rate_density import (calc_CSFRD_Madau2014, convert_age_to_z)
#
# User Setting
#
#obs_area = 7200*u.arcmin*u.arcmin # 1.4*1.4*u.deg*u.deg
#obs_area = 1.4*1.4*u.deg*u.deg
obs_area = 1.5546582999901375*u.deg*u.deg
#obs_area = 2.0*u.deg*u.deg
print('obs_area = %s [%s]'%(obs_area.to(u.arcmin*u.arcmin).value, obs_area.to(u.arcmin*u.arcmin).unit))
print('obs_area = %s [%s]'%(obs_area.to(u.steradian).value, obs_area.to(u.steradian).unit))
#print('obs_area = %s [%s]'%(7200 * 3600 / 4.25451703e10, 'steradian')) # checked consistent
#
# Read data points
#
tb = Table.read('datatable_generated_galaxies_with_coordinates.fits')
#print(tb.colnames)
#print(tb['MSTAR'].data.shape)
#print(tb['MSTAR'][0][0], tb['SFR'][0][0])
data_lgMstar = tb['lgMstar'].data.flatten()
data_Mstar = 10**data_lgMstar
data_lgSFR = tb['lgSFR'].data.flatten()
data_SFR = 10**data_lgSFR
data_redshift = tb['z'].data.flatten()
#sys.exit()
#
# def
#
def tick_function(X):
V = cosmo.age(X).value
return ['%0.1f' % t for t in V]
#
# fig
#
fig = plt.figure(figsize=(6.8,4.8))
fig.subplots_adjust(left=0.15, right=0.95, bottom=0.105, top=0.885)
ax1 = fig.add_subplot(1,1,1)
ax1.set_xlabel('Redshift', fontsize=16, labelpad=1)
ax1.set_ylabel(r'$\log_{10} \, \rho_{\mathrm{SFR}}$ [$\mathrm{M_{\odot}\,yr^{-1}\,Mpc^{-3}}$]', fontsize=17, labelpad=15)
ax1.tick_params(axis='both', labelsize=14)
ax1.tick_params(direction='in', axis='both', which='both')
ax1.tick_params(top=False, right=True, which='both')
my_tick_locations = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
ax1.set_xticks(my_tick_locations)
#ax1.set_xlim([-0.3, np.max(my_tick_locations)])
ax1.set_xlim([-0.2, 5.5])
ax1.set_ylim([-2.0, -0.2])
ax1.grid(True, ls='--', lw=0.25)
#new_tick_locations = convert_age_to_z([13.7, 2.0, 1.0, 0.5, 0.3]) #<20190915><BUGGY>#
new_tick_locations = convert_age_to_z([cosmo.age(0).value, 5.0, 3.0, 2.0, 1.0, 0.7, ])
new_tick_locations = new_tick_locations[np.argwhere(np.logical_and(new_tick_locations >= ax1.get_xlim()[0], new_tick_locations <= ax1.get_xlim()[1])).flatten()]
print('new_tick_locations', new_tick_locations)
ax2 = ax1.twiny()
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(new_tick_locations)
ax2.set_xticklabels(tick_function(new_tick_locations))
ax2.set_xlabel(r"Cosmic Age [$\mathrm{Gyr}$]", fontsize=16, labelpad=6)
ax2.minorticks_off()
ax2.grid(None)
# show y minor ticks
ax1.yaxis.set_major_locator(ticker.MultipleLocator(0.2))
ax1.yaxis.set_minor_locator(ticker.MultipleLocator(0.05))
#ax1.yaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=99))
#ax1.yaxis.set_minor_locator(ticker.LogLocator(base=10.0, subs=np.arange(2,10)*0.1, numticks=99))
#
# z
z_edges = np.linspace(0.0, 6.0, num=30, endpoint=True) # np.array([0.02, 0.25, 0.50, 0.75, 1.00, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0])
z_centers = (z_edges[0:-1] + z_edges[1:]) / 2.0
lgPhi_SFR = z_centers * 0.0 - 99
#
# loop z bin
for i in range(len(z_edges)-1):
#
print('z %s - %s'%(z_edges[i], z_edges[i+1]))
#
z = z_centers[i]
#
#comoving_volume = ((cosmo.comoving_volume(z_edges[i+1]) - cosmo.comoving_volume(z_edges[i])) / (4.0*np.pi*u.steradian) * obs_area.to(u.steradian))
#print('comoving_volume = %e [%s]'%(comoving_volume.value, comoving_volume.unit))
#
differntial_z_list = np.linspace(z_edges[i], z_edges[i+1], num=10, endpoint=True)
comoving_volume = np.sum((cosmo.differential_comoving_volume(differntial_z_list[1:]) * np.diff(differntial_z_list) * obs_area.to(u.steradian)))
print('comoving_volume = %e [%s]'%(comoving_volume.value, comoving_volume.unit))
#
# select and count CSFRD
data_selection = np.logical_and.reduce((data_redshift >= z_edges[i], data_redshift < z_edges[i+1], data_SFR > 0, data_lgMstar >= 9.0))
data_Phi_SFR = np.sum(data_SFR[data_selection]) / comoving_volume.value
lgPhi_SFR[i] = np.log10(data_Phi_SFR)
#
#
Phi_SFR_MD14 = calc_CSFRD_Madau2014(z_centers)
lgPhi_SFR_MD14 = np.log10(Phi_SFR_MD14)
ax1.plot(z_centers, lgPhi_SFR_MD14, c='red', ls='solid', solid_capstyle='butt', alpha=0.8, lw=2, label=r'SMF MD14')
#
#
plot_label = r'dzliu model (lgMstar$\gtrsim$9.0)'
current_dir = os.path.basename(os.getcwd())
if re.match(r'^.*_using_([a-zA-Z0-9]+)_MS([_].*|)$', current_dir):
plot_label = plot_label + '\n(%s MS)'%(re.sub(r'^.*_using_([a-zA-Z0-9]+)_MS([_].*|)$', r'\1', current_dir))
ax1.step(z_centers, lgPhi_SFR, where='mid', alpha=0.6, label=plot_label)
#
#
#plot_legend1 = plt.legend(\
# legend_handles,
# legend_labels,
# fontsize=16, loc='upper right',
# #borderpad=0.6, borderaxespad=0.6, handlelength=2.8,
# )
#ax1.add_artist(plot_legend1)
ax1.legend(loc='upper left', ncol=2, framealpha=0.5)
#
# savefig
fig.savefig('Plot_CSFRD.pdf', transparent=True)
print('Output to "%s"!' % ('Plot_CSFRD.pdf') )
os.system('open "%s"' % ('Plot_CSFRD.pdf') )
| [
"liudz1054@gmail.com"
] | liudz1054@gmail.com |
fd846adca759b0c9b2a65e2e835dc49ca3f384b1 | 0bb4e169444ae71b3d64522f94141a7a00054111 | /userid_to_slug.py | 2b7b1f25806f1a8740f94cc3c660ca6b7d14e5ad | [
"CC0-1.0"
] | permissive | riceissa/ea-forum-reader | 5cf02f52f767c28417a64b4b76fc57c07ab0f011 | 4c8b76b8947b41bc8f3b3067e75ef7870eb4fa6b | refs/heads/master | 2022-05-05T22:21:48.011316 | 2022-03-12T19:01:36 | 2022-03-12T19:01:36 | 156,928,401 | 11 | 3 | NOASSERTION | 2018-11-24T07:06:07 | 2018-11-09T23:16:13 | Python | UTF-8 | Python | false | false | 308 | py | #!/usr/bin/env python3
import sys
import util
# For some reason, the Algolia search result JSON only has the user ID and
# username, not the user slug. So to be able to link to the user page from
# search results, we need to conver the userid to a user slug.
print(util.userid_to_userslug(sys.argv[1]))
| [
"riceissa@gmail.com"
] | riceissa@gmail.com |
f2ca82e1a1bd182480593aac57fe76d0675b56f5 | e45d2faad9389886a82ff5176853b1ff6e37caae | /argparse/047_argparse_conflict_handler.py | 4098c31734537ec1c7e84ff61f17359e57119df6 | [] | no_license | allenmo/python_study | 6320aa4cd80fe46ccf73076015c67bdcb6338d30 | 7aff5d810ca6e791d62235d57c072a8dc14457ca | refs/heads/master | 2021-03-24T12:00:33.079530 | 2016-11-22T23:35:58 | 2016-11-22T23:35:58 | 55,770,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | import argparse
parser = argparse.ArgumentParser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-f', '--foo', help='old foo help')
parser.add_argument('--foo', help='new foo help')
parser.print_help()
| [
"allen02403@gmail.com"
] | allen02403@gmail.com |
65daa93551bf2b8891afdd05edfb2d723e291f96 | f188379dc9c1e5b63e432d434c782a4d6997872b | /5_List Advanced/Lab/03. Palindrome Strings.py | 59d10bcd5a3064551f8fafa8f18d719c67768992 | [] | no_license | GalyaBorislavova/SoftUni_Python_Fundamentals_January_2021 | 39d7eb8c28f60ff3c293855b074c49ac622a6036 | 7d479fd6c8e4136fb07b765458cc00088e09767a | refs/heads/main | 2023-06-15T04:16:17.084825 | 2021-06-30T18:05:42 | 2021-06-30T18:05:42 | 381,785,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | words = input().split(" ")
searched_palindrome = input()
palindromes = [p for p in words if p == p[::-1]]
print(palindromes)
print(f"Found palindrome {palindromes.count(searched_palindrome)} times") | [
"galyaborislavova888@gmail.com"
] | galyaborislavova888@gmail.com |
db618d8285a21d6d5d76d68ec2af44087c421572 | 1725fddc758271af6ce911dfe7dcca3f2bb3233d | /service/workflow/workflow_custom_notice_service.py | e8f59ece0a9ac0dd4f12bc77c9fbd59998e0775c | [
"MIT"
] | permissive | blackholll/loonflow | 7948018324664d6a88df0616275ad4ce28400745 | b0e236b314286c5f6cc6959622c9c8505e776443 | refs/heads/master | 2023-08-22T21:32:08.223136 | 2023-06-04T13:39:27 | 2023-06-04T13:39:27 | 120,720,556 | 1,864 | 733 | MIT | 2023-07-05T23:24:18 | 2018-02-08T06:26:53 | Python | UTF-8 | Python | false | false | 5,191 | py | import json
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from apps.workflow.models import CustomNotice
from service.base_service import BaseService
from service.common.constant_service import constant_service_ins
from service.common.log_service import auto_log
class WorkflowCustomNoticeService(BaseService):
"""
工作流通知服务
"""
def __init__(self):
pass
@classmethod
@auto_log
def get_notice_list(cls, query_value: str, page: int, per_page: int, simple: bool=False)->tuple:
"""
获取通知列表
:param query_value:
:param page:
:param per_page:
:param simple: 简单数据
:return:
"""
query_params = Q(is_deleted=False)
if query_value:
query_params &= Q(name__contains=query_value) | Q(description__contains=query_value)
custom_notice_querset = CustomNotice.objects.filter(query_params).order_by('id')
paginator = Paginator(custom_notice_querset, per_page)
try:
custom_notice_result_paginator = paginator.page(page)
except PageNotAnInteger:
custom_notice_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
custom_notice_result_paginator = paginator.page(paginator.num_pages)
custom_notice_result_object_list = custom_notice_result_paginator.object_list
custom_notice_result_restful_list = []
for custom_notice_result_object in custom_notice_result_object_list:
per_notice_data = custom_notice_result_object.get_dict()
if simple:
per_notice_data.pop("corpid")
per_notice_data.pop("corpsecret")
per_notice_data.pop("appkey")
per_notice_data.pop("appsecret")
per_notice_data.pop("hook_url")
per_notice_data.pop("hook_token")
custom_notice_result_restful_list.append(per_notice_data)
return custom_notice_result_restful_list, dict(per_page=per_page, page=page, total=paginator.count)
@classmethod
@auto_log
def add_custom_notice(cls, name: str, description: str, type_id: int, corpid: str, corpsecret: str, appkey: str,
appsecret: str, hook_url: str, hook_token: str, creator: str)->tuple:
"""
新增自定义通知记录
:param name:
:param description:
:param type_id:
:param corpid:
:param corpsecret:
:param appkey:
:param appsecret:
:param hook_url:
:param hook_token:
:param creator:
:return:
"""
notice_obj = CustomNotice(name=name, description=description, type_id=type_id, corpid=corpid,
corpsecret=corpsecret, appkey=appkey, appsecret=appsecret, hook_url=hook_url,
hook_token=hook_token, creator=creator)
notice_obj.save()
return True, dict(notice_id=notice_obj.id)
@classmethod
@auto_log
def update_custom_notice(cls, custom_notice_id: int, name: str, description: str, type_id: int,
corpid: str, corpsecret: str, appkey: str, appsecret: str, hook_url: str,
hook_token: str)->tuple:
"""
更新自定义通知
:param custom_notice_id:
:param name:
:param description:
:param hook_url:
:param hook_token:
:return:
"""
custom_notice_obj = CustomNotice.objects.filter(id=custom_notice_id, is_deleted=0)
if custom_notice_obj:
custom_notice_obj.update(name=name, description=description, hook_url=hook_url, hook_token=hook_token,
type_id=type_id, corpid=corpid, corpsecret=corpsecret, appkey=appkey,
appsecret=appsecret)
else:
return False, 'the record is not existed or has been deleted'
return True, ''
@classmethod
@auto_log
def del_custom_notice(cls, custom_notice_id: int)->tuple:
"""
删除脚本
:id:
:return:
"""
custom_notice_obj = CustomNotice.objects.filter(id=custom_notice_id, is_deleted=0)
if custom_notice_obj:
custom_notice_obj.update(is_deleted=True)
return True, ''
else:
return False, 'the record is not exist or has been deleted'
@classmethod
@auto_log
def get_notice_detail(cls, custom_notice_id: int)->tuple:
"""
获取通知详情
:param custom_notice_id:
:return:
"""
custom_notice_obj = CustomNotice.objects.filter(id=custom_notice_id, is_deleted=0).first()
if custom_notice_obj:
custom_notice_info = custom_notice_obj.get_dict()
return True, custom_notice_info
else:
return False, 'record is not exist or has been deleted'
workflow_custom_notice_service_ins = WorkflowCustomNoticeService()
| [
"blackholll@163.com"
] | blackholll@163.com |
483133ff3733c8c7954f199313fc21a841f27a54 | ad38b9a924911b3249b9ffec01d78a2b1048fa0d | /动态调试/Immunity Debugger v1.73/Lib/test/test_class.py | 97e262adbfbd6054364005f392020d097da930ce | [] | no_license | h3len/HackerToolBox | 77c5a45553784d20104db21ac5fe8f840ca519a6 | 4397b0c25cfd0eb3f92484f396745cc664af2531 | refs/heads/master | 2020-04-04T22:57:47.376773 | 2018-10-10T15:43:06 | 2018-10-10T15:50:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,813 | py | "Test the functionality of Python classes implementing operators."
from test.test_support import TestFailed
testmeths = [
# Binary operations
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"div",
"rdiv",
"mod",
"rmod",
"divmod",
"rdivmod",
"pow",
"rpow",
"rshift",
"rrshift",
"lshift",
"rlshift",
"and",
"rand",
"or",
"ror",
"xor",
"rxor",
# List/dict operations
"contains",
"getitem",
"getslice",
"setitem",
"setslice",
"delitem",
"delslice",
# Unary operations
"neg",
"pos",
"abs",
# generic operations
"init",
]
# These need to return something other than None
# "coerce",
# "hash",
# "str",
# "repr",
# "int",
# "long",
# "float",
# "oct",
# "hex",
# These are separate because they can influence the test of other methods.
# "getattr",
# "setattr",
# "delattr",
class AllTests:
def __coerce__(self, *args):
print "__coerce__:", args
return (self,) + args
def __hash__(self, *args):
print "__hash__:", args
return hash(id(self))
def __str__(self, *args):
print "__str__:", args
return "AllTests"
def __repr__(self, *args):
print "__repr__:", args
return "AllTests"
def __int__(self, *args):
print "__int__:", args
return 1
def __float__(self, *args):
print "__float__:", args
return 1.0
def __long__(self, *args):
print "__long__:", args
return 1L
def __oct__(self, *args):
print "__oct__:", args
return '01'
def __hex__(self, *args):
print "__hex__:", args
return '0x1'
def __cmp__(self, *args):
print "__cmp__:", args
return 0
def __del__(self, *args):
print "__del__:", args
# Synthesize AllTests methods from the names in testmeths.
method_template = """\
def __%(method)s__(self, *args):
print "__%(method)s__:", args
"""
for method in testmeths:
exec method_template % locals() in AllTests.__dict__
del method, method_template
# this also tests __init__ of course.
testme = AllTests()
# Binary operations
testme + 1
1 + testme
testme - 1
1 - testme
testme * 1
1 * testme
if 1/2 == 0:
testme / 1
1 / testme
else:
# True division is in effect, so "/" doesn't map to __div__ etc; but
# the canned expected-output file requires that __div__ etc get called.
testme.__coerce__(1)
testme.__div__(1)
testme.__coerce__(1)
testme.__rdiv__(1)
testme % 1
1 % testme
divmod(testme,1)
divmod(1, testme)
testme ** 1
1 ** testme
testme >> 1
1 >> testme
testme << 1
1 << testme
testme & 1
1 & testme
testme | 1
1 | testme
testme ^ 1
1 ^ testme
# List/dict operations
1 in testme
testme[1]
testme[1] = 1
del testme[1]
testme[:42]
testme[:42] = "The Answer"
del testme[:42]
testme[2:1024:10]
testme[2:1024:10] = "A lot"
del testme[2:1024:10]
testme[:42, ..., :24:, 24, 100]
testme[:42, ..., :24:, 24, 100] = "Strange"
del testme[:42, ..., :24:, 24, 100]
# Now remove the slice hooks to see if converting normal slices to slice
# object works.
del AllTests.__getslice__
del AllTests.__setslice__
del AllTests.__delslice__
import sys
if sys.platform[:4] != 'java':
testme[:42]
testme[:42] = "The Answer"
del testme[:42]
else:
# This works under Jython, but the actual slice values are
# different.
print "__getitem__: (slice(0, 42, None),)"
print "__setitem__: (slice(0, 42, None), 'The Answer')"
print "__delitem__: (slice(0, 42, None),)"
# Unary operations
-testme
+testme
abs(testme)
int(testme)
long(testme)
float(testme)
oct(testme)
hex(testme)
# And the rest...
hash(testme)
repr(testme)
str(testme)
testme == 1
testme < 1
testme > 1
testme <> 1
testme != 1
1 == testme
1 < testme
1 > testme
1 <> testme
1 != testme
# This test has to be last (duh.)
del testme
if sys.platform[:4] == 'java':
import java
java.lang.System.gc()
# Interfering tests
class ExtraTests:
def __getattr__(self, *args):
print "__getattr__:", args
return "SomeVal"
def __setattr__(self, *args):
print "__setattr__:", args
def __delattr__(self, *args):
print "__delattr__:", args
testme = ExtraTests()
testme.spam
testme.eggs = "spam, spam, spam and ham"
del testme.cardinal
# return values of some method are type-checked
class BadTypeClass:
def __int__(self):
return None
__float__ = __int__
__long__ = __int__
__str__ = __int__
__repr__ = __int__
__oct__ = __int__
__hex__ = __int__
def check_exc(stmt, exception):
"""Raise TestFailed if executing 'stmt' does not raise 'exception'
"""
try:
exec stmt
except exception:
pass
else:
raise TestFailed, "%s should raise %s" % (stmt, exception)
check_exc("int(BadTypeClass())", TypeError)
check_exc("float(BadTypeClass())", TypeError)
check_exc("long(BadTypeClass())", TypeError)
check_exc("str(BadTypeClass())", TypeError)
check_exc("repr(BadTypeClass())", TypeError)
check_exc("oct(BadTypeClass())", TypeError)
check_exc("hex(BadTypeClass())", TypeError)
# mixing up ints and longs is okay
class IntLongMixClass:
def __int__(self):
return 0L
def __long__(self):
return 0
try:
int(IntLongMixClass())
except TypeError:
raise TestFailed, "TypeError should not be raised"
try:
long(IntLongMixClass())
except TypeError:
raise TestFailed, "TypeError should not be raised"
# Test correct errors from hash() on objects with comparisons but no __hash__
class C0:
pass
hash(C0()) # This should work; the next two should raise TypeError
class C1:
def __cmp__(self, other): return 0
check_exc("hash(C1())", TypeError)
class C2:
def __eq__(self, other): return 1
check_exc("hash(C2())", TypeError)
# Test for SF bug 532646
class A:
pass
A.__call__ = A()
a = A()
try:
a() # This should not segfault
except RuntimeError:
pass
else:
raise TestFailed, "how could this not have overflowed the stack?"
# Tests for exceptions raised in instance_getattr2().
def booh(self):
raise AttributeError, "booh"
class A:
a = property(booh)
try:
A().a # Raised AttributeError: A instance has no attribute 'a'
except AttributeError, x:
if str(x) != "booh":
print "attribute error for A().a got masked:", str(x)
class E:
__eq__ = property(booh)
E() == E() # In debug mode, caused a C-level assert() to fail
class I:
__init__ = property(booh)
try:
I() # In debug mode, printed XXX undetected error and raises AttributeError
except AttributeError, x:
pass
else:
print "attribute error for I.__init__ got masked"
# Test comparison and hash of methods
class A:
def __init__(self, x):
self.x = x
def f(self):
pass
def g(self):
pass
def __eq__(self, other):
return self.x == other.x
def __hash__(self):
return self.x
class B(A):
pass
a1 = A(1)
a2 = A(2)
assert a1.f == a1.f
assert a1.f != a2.f
assert a1.f != a1.g
assert a1.f == A(1).f
assert hash(a1.f) == hash(a1.f)
assert hash(a1.f) == hash(A(1).f)
assert A.f != a1.f
assert A.f != A.g
assert B.f == A.f
assert hash(B.f) == hash(A.f)
# the following triggers a SystemError in 2.4
a = A(hash(A.f.im_func)^(-1))
hash(a.f)
| [
"redleavessun@gmail.com"
] | redleavessun@gmail.com |
307765967f950baa6009e7ca57e4b343db27a692 | 17f122497a3fb90105cb35b08b8ba4d2379831b5 | /papyrobot/utils/answer.py | 799e31d5ed4b7e2f9affa095110742d0a317dc8e | [] | no_license | Zepmanbc/oc_dapython_pr7 | 198084d7e9c37a8949186c3ff4f89d59eaf54d2f | dc2d38ba2164669167423c80d94b52d7257a548e | refs/heads/master | 2020-04-25T17:39:01.008881 | 2019-04-17T20:33:16 | 2019-04-17T20:33:16 | 172,956,325 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | #! /usr/bin/env python
"""Random Answer module"""
import json
from random import randint
class Answer():
"""Answer class.
answer = Amswer()
print(answer.response(category))
return: str
Categories:
intro
introduce_story
no_result
sentences are in 'papyrobot/static/json/dialog.json'
"""
def __init__(self):
"""Initialise sentenses."""
with open('papyrobot/static/json/dialog.json') as json_data:
self.dialog = json.load(json_data)
def response(self, category):
"""Return a sentence for a choosen category.
Arg:
category (str): from self.dialog keys
Return:
str: randomly choosen sentence forn the category
Error:
KeyError: if category unknown
"""
if category in self.dialog.keys():
return self.dialog[category][randint(0, len(self.dialog[category]) - 1)]
else:
raise KeyError("Incorrect Category")
if __name__ == "__main__":
# answer = Answer()
# print(answer.response('ff'))
pass
| [
"zepman@gmail.com"
] | zepman@gmail.com |
df182ed949bc897da31a5f60c843f4350b9ca785 | 9d7d69178c6f1f1db6ed6767e0af32bfe836549c | /new_workspace/Gumtree_Workspace/UsefulPythonScripts/plexi_flux_v_lambda.py | 9ddf49092d9dae358e8d1a007d8d733b52982742 | [] | no_license | Gumtree/Quokka_scripts | 217958288b59adbdaf00a9a13ece42f169003889 | c9687d963552023d7408a8530005a99aabea1697 | refs/heads/master | 2023-08-30T20:47:32.142903 | 2023-08-18T03:38:09 | 2023-08-18T03:38:09 | 8,191,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | from time import sleep
from gumpy.commons import sics
##driveAtt(270)
sics.drive('nvs_lambda', 4.505)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 4.9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.1)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.2)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.3)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.4)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 5.9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
##driveAtt(240)
sics.drive('nvs_lambda', 6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.1)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.2)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.3)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.4)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.6)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 6.9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 7)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
##driveAtt(210)
sics.drive('nvs_lambda', 7.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 8)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 8.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(180)
sics.drive('nvs_lambda', 9)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 9.5)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(150)
sics.drive('nvs_lambda', 10)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 11)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 12)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(90)
sics.drive('nvs_lambda', 17)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
#driveAtt(60)
sics.drive('nvs_lambda', 20)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 25)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
sics.drive('nvs_lambda', 40)
sleep(0.1)
print 'start scanning for '
quokka.scan("time", 120)
sleep(0.1)
| [
"quokka@DAV1-QUOKKA.nbi.ansto.gov.au"
] | quokka@DAV1-QUOKKA.nbi.ansto.gov.au |
0685786ef1c40ebd5e6c8ea6eed8236dd947972b | 5e7cacad2e9c0b6cc8677412b8d1bce92d01f197 | /realefforttask/pages.py | 47cd26df58797bd8b62258f2483729039e32b526 | [] | no_license | chapkovski/real-effort-several-rounds | 3acb55a8396aae82a8e8609641f38e16e546df64 | 8df230066aa4d085b7d1dfcc36774749348bf038 | refs/heads/master | 2018-12-24T17:30:44.617345 | 2018-05-10T16:06:20 | 2018-05-10T16:06:20 | 117,924,197 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from otree.api import Currency as c, currency_range
from . import models
from ._builtin import Page, WaitPage
from .models import Constants
class WorkPage(Page):
timer_text = 'Time left to complete this round:'
timeout_seconds = Constants.task_time
def before_next_page(self):
self.player.dump_tasks()
page_sequence = [
WorkPage,
]
| [
"chapkovski@gmail.com"
] | chapkovski@gmail.com |
6c4ba86895231e56bc3c8162d45342e283b07bdb | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /analysis_tools/iFAB/psu_python_library/ifab_cost_surrogate_steel.py | 707fafbbb492bfb3fb23b3c9e4ed5146c8fd1915 | [
"LicenseRef-scancode-other-permissive"
] | permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 756 | py | import json
from sys import argv
from sys import stderr
try:
with open("testbench_manifest.json", "r+") as j:
manifest = json.load(j)
except IOError:
manifest={"Parameters":[], "Metrics":[]}
mass = None
for param in manifest["Parameters"]:
if param["Name"] == "MassInput":
mass = param["Value"]
if mass is None:
mass = float(argv[1])
surrogate_cost = 15059 + 4.4715 * float(mass)
for metric in manifest["Metrics"]:
if metric["Name"] == "surrogate_cost":
metric["Value"] = surrogate_cost
metric["Unit"] = "dollars"
with open("testbench_manifest.json", "w") as json_file:
json.dump(manifest, json_file, indent=4)
stderr.write("{0:.2f}\n".format(surrogate_cost))
| [
"kevin.m.smyth@gmail.com"
] | kevin.m.smyth@gmail.com |
6432db83cc0fcd16c5619e9d4e0a4e53f18d699b | ffbfb86db9dac89c1cc24e648b199a8d3db9850f | /python/python_split.py | 5c303afc89c76d0523735efbb2f7ea483c3279d4 | [] | no_license | attikis/programming | e7ecef5c2bf2af71a3e89e6156a4a934fb2ed724 | 4b6215021d6ca1effd0f18ecfe8afc67056b7098 | refs/heads/master | 2021-05-09T21:51:40.425627 | 2019-02-19T07:22:56 | 2019-02-19T07:22:56 | 118,735,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | #!/usr/bin/env python
# Permissions : chmod +x fileName.py
# To launch : python fileName.py OR ./fileName.py
# Definition : split() command used to split up a string
string1 = "pt1,pt2,pt3,pt4"
print "+++ string1 = ", string1
print '+++ string1.split(",") = %s' % (string1.split(","))
print
string2 = "pt1|pt2|pt3|pt4"
print "+++ string2 = ", string2
print '+++ string2.split("|") = %s' % (string2.split("|"))
print
string3 = "pt1 pt2 pt3 pt4"
print "+++ string3 = ", string3
print '+++ string3.split(" ") = %s' % (string3.split(" "))
print
string4 = "pt1 <**> pt2 <**> pt3 <**> pt4"
print "+++ string4 = ", string4
print '+++ string4.split(" <**> ") = %s' % (string4.split(" <**> "))
print '+++ string4.split(" <**> ", 1) = %s' % (string4.split(" <**> ",1))
print '+++ string4.split(" <**> ", 2) = %s' % (string4.split(" <**> ",2))
print
string5 = "My name is Alexandros Attikis"
print "+++ string5 = ", string5
print "+++ string5.split()", string5.split()
print '+++ string5.split(" ")', string5.split(" ")
print
mLineString = "This \n is \n a multiline \n string"
print "+++ mLineString = ", mLineString
print "+++ mLineString.split() = ", mLineString.split()
print "+++ mLineString.splitlines() = ", mLineString.splitlines()
print
print "+++ Printing all characters in a multi-line string"
for line in mLineString:
print line.split()
print
print "+++ Printing all words in a multi-line string (in separate lists)"
for line in mLineString.splitlines():
print line.split()
print
print "+++ Printing all words in a multi-line string (in one list)"
print "+++ mLineString.splitlines() = ", mLineString.splitlines()
| [
"attikis@cern.ch"
] | attikis@cern.ch |
695a3b083fd4b572001864fc33c6a61d4be8573d | b523cbb45c34e9c0f1d94fce9f03d654e18f57ab | /instance_manager.py | bef0fed7e98b69508cc10e4b5957e689b5b26358 | [] | no_license | brittainhard/gpuci-scripts | 0d2d27156486724dbe80a73f31b5076bcef4324b | 27429a89b3a3564acf372fe30d6de2f59c9997c0 | refs/heads/master | 2020-03-26T00:16:38.664253 | 2018-08-29T17:29:19 | 2018-08-29T17:29:19 | 144,313,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,250 | py | import os, json, argparse, time
import datetime, dateutil
from jenkinsapi import jenkins
import requests
import boto3
NON_GPU_JOBS = [
"goai-docker-container-builder",
"gpu-instance-manager",
"gpu-instance-spawner"
]
def get_instances():
return list(rs.instances.iterator())
def instance_is_running(instance):
return instance.state["Code"] == 16
def get_running_instances(instances):
running_instances = []
for x in instances:
if instance_is_running(x):
running_instances.append(x)
return running_instances
def get_gpu_instance(instances):
for x in instances:
if x.image.id == AMI:
return x
return None
def attach_elastic_ip(instance):
try:
response = cl.associate_address(AllocationId=ELASTIC_IP,
InstanceId=instance.id)
print(response)
except ClientError as e:
print(e)
def create_gpu_instance(dry_run=False):
spot_request = cl.request_spot_instances(
DryRun=dry_run,
InstanceCount=1,
SpotPrice=SPOT_PRICE,
Type="one-time",
LaunchSpecification={
"ImageId": AMI,
"KeyName": "goai-gpuci",
"SecurityGroupIds": [SECURITY_GROUP],
"InstanceType": INSTANCE_SIZE,
"Placement": {
"AvailabilityZone": "us-east-2b"
}
}
)
def spawn_instances(dry_run=False):
instances = get_instances()
running = get_running_instances(instances)
gpu = get_gpu_instance(running)
if gpu:
return
elif not gpu:
create_gpu_instance(dry_run)
instance = None
while not instance:
print("Not Running.")
time.sleep(5)
instance = get_gpu_instance(get_running_instances(get_instances()))
print("Instance created.")
attach_elastic_ip(instance)
print("Elastic IP Attached.")
time.sleep(5)
def get_jobs():
jenk = jenkins.Jenkins(JENKINS_URL)
jobs = []
for item in jenk.items():
if str(item[1]) in [str(job) for job in jobs]:
continue
elif str(item[1]) in NON_GPU_JOBS:
continue
jobs.append(item[1])
return jobs
def jobs_running(jobs):
return any([job.is_running() for job in jobs])
def time_difference(instance):
tm = datetime.datetime.now(tz=dateutil.tz.tz.tzutc()) - instance.launch_time
hours, remainder = divmod(tm.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return datetime.time(minute=minutes, second=seconds)
def close_to_next_hour(instance):
difference = 60 - time_difference(instance).minute
return difference <= 2, difference
def manage_instances(dry_run=False, terminate_instance=False):
jobs = jobs_running(get_jobs())
gpu = get_gpu_instance(get_running_instances(get_instances()))
if gpu and terminate_instance:
gpu.terminate()
return
if not gpu:
print("Instance is not running.")
return
expiry = close_to_next_hour(gpu)
if not expiry[0]:
print("Instance not yet ready to be stopped.")
print("%d minutes left" % expiry[1])
return
if jobs:
print("Jobs are still running")
return
if not dry_run:
print("Terminating instance")
gpu.terminate()
return
if __name__ == "__main__":
SECURITY_GROUP = os.environ.get("SECURITY_GROUP", "")
AMI = os.environ.get("AMI", "")
ELASTIC_IP = os.environ.get("ELASTIC_IP", "")
INSTANCE_SIZE = os.environ.get("INSTANCE_SIZE", "")
JENKINS_URL = os.environ.get("JENKINS_URL", "")
AWS_CREDENTIALS_URL = os.environ.get("AWS_CREDENTIALS_URL", "")
SPOT_PRICE = float(os.environ.get("SPOT_PRICE", "0.0"))
r = requests.get(AWS_CREDENTIALS_URL)
creds = json.loads(r.text)
AWS_KEY_ID = creds["AccessKeyId"]
AWS_KEY = creds["SecretAccessKey"]
AWS_SESSION_TOKEN = creds["Token"]
session = boto3.Session(
aws_access_key_id=AWS_KEY_ID,
aws_secret_access_key=AWS_KEY,
aws_session_token=AWS_SESSION_TOKEN,
region_name="us-east-2"
)
rs = session.resource('ec2')
cl = session.client('ec2')
parser = argparse.ArgumentParser("Spawns instances and checks for instance statuses.")
parser.add_argument("--spawn-instances", dest="instance_spawner",
action="store_true", default=False)
parser.add_argument("--manage-instances", dest="instance_manager",
action="store_true", default=False)
parser.add_argument("--dry-run", dest="dry_run",
action="store_true", default=False)
parser.add_argument("--terminate-instance", dest="terminate",
action="store_true", default=False)
args = parser.parse_args()
if args.instance_spawner and args.instance_manager:
exit("Cannot spawn and manage instances at the same time.")
elif not args.instance_spawner and not args.instance_manager:
exit("Please specify either --spawn-instances or --manage-instances.")
elif args.instance_spawner:
spawn_instances(dry_run=args.dry_run)
exit(0)
elif args.instance_manager:
manage_instances(dry_run=args.dry_run, terminate_instance=args.terminate)
exit(0)
| [
"brittainhard@gmail.com"
] | brittainhard@gmail.com |
7aea99a375d5f22517415ab30c8a1a7ada5ba817 | 58aade23413d23f0d4666d7da3766ccbf820d0e1 | /bk/script/summarize_script/summary_phs.py | 1f480209775dfd6bf6a9fff5eb203d679377c5dc | [] | no_license | friend1ws/PDL1_pipeline | 661837ad4f3c791439fcbae3ca32db47b2b6e8a2 | 79bb55297dac04c808577d51f8714a34fe9dad74 | refs/heads/master | 2020-04-15T02:32:43.931129 | 2016-05-04T06:47:20 | 2016-05-04T06:47:20 | 41,615,248 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | #! /usr/bin/env python
import sys, glob
sampleList = sys.argv[1]
expDir = sys.argv[2]
fusionDir = sys.argv[3]
hIN = open(sampleList, 'r')
for line in hIN:
F = line.rstrip('\n').split('\t')
sample = F[0]
targetExpDir = glob.glob(expDir + "/" + sample + "*")
targetFusionDir = glob.glob(fusionDir + "/" + sample + "*")
expRatio = "---"
if len(targetExpDir) > 0:
hIN2 = open(targetExpDir[0] + "/CD274.exon.exp.txt", "r")
tmp1 = 10.0
tmp2 = 10.0
for line in hIN2:
F2 = line.rstrip('\n').split('\t')
if F2[0] == "CD274_3": tmp1 = float(F2[1])
if F2[0] == "CD274_UTR": tmp2 = float(F2[1])
expRatio = tmp1 / tmp2
fusion = "---"
if len(targetFusionDir) > 0:
hIN2 = open(targetFusionDir[0] + "/star.fusion.result.txt", "r")
for line in hIN2:
F2 = line.rstrip('\n').split('\t')
if F2[8] == "CD274" or F2[9] == "CD274":
fusion = True
if fusion == "---": fusion = False
if expRatio != "---" and fusion != "---":
print sample + '\t' + str(expRatio) + '\t' + str(fusion)
| [
"friend1ws@gmail.com"
] | friend1ws@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.